mirror of
https://github.com/huggingface/peft.git
synced 2025-10-20 23:43:47 +08:00
Compare commits
355 Commits
Author | SHA1 | Date | |
---|---|---|---|
30889ef260 | |||
67918efb49 | |||
189a9a666d | |||
bfc102c0c0 | |||
1c1c7fdaa6 | |||
4a15595822 | |||
bb2471d926 | |||
54ca31153d | |||
ebbff4023a | |||
62237dc9b1 | |||
eaa5eef28e | |||
bf54136a79 | |||
a43ec59762 | |||
0089ebd272 | |||
fe01d6de85 | |||
f9b673ea37 | |||
dc28a61e82 | |||
71585d611f | |||
c6bcf91ca1 | |||
4354a7d496 | |||
f36f50acb4 | |||
777c0b6ad7 | |||
6451cbd70c | |||
7d28536b18 | |||
eb2c12d99a | |||
c6b28a22b8 | |||
e96eef9ea1 | |||
54ee2fb1af | |||
cbd783b4df | |||
26504a0119 | |||
4186c9b104 | |||
8665e2b571 | |||
cbf346d962 | |||
2a0fb71f4f | |||
c4cf9e7d3b | |||
cf04d0353f | |||
4023da904f | |||
6fe1aac65d | |||
799420aef1 | |||
993836ff90 | |||
1c9679ac71 | |||
e745ffd7d0 | |||
029dcd5a1c | |||
482a2a6d9a | |||
119de1715c | |||
a0a46c06db | |||
3708793ba9 | |||
46a84bd395 | |||
bd544bb2ce | |||
55c37e9c0b | |||
997e6ec5ab | |||
ddb114af0a | |||
4b02148af2 | |||
0f1e9091cc | |||
88e2e75cc3 | |||
c9df262d69 | |||
67a08009ff | |||
971dd6e815 | |||
ee6f6dcee7 | |||
21c304f6f6 | |||
e73967edea | |||
b08e6faf2b | |||
5c13ea3b12 | |||
00b820061e | |||
504d3c8329 | |||
fc9f4b3176 | |||
895513c465 | |||
c893394808 | |||
86562eec49 | |||
b467e3de5c | |||
2ab005f3ab | |||
b482391b80 | |||
d56df7fc64 | |||
a87ff4c744 | |||
2665f80a17 | |||
9fd788bedb | |||
2336780f9e | |||
c22a8e5d47 | |||
1a7433b136 | |||
70d559d029 | |||
bffbbbf76a | |||
9c70468a3c | |||
f7cf460f7c | |||
1b1091c158 | |||
c456d55216 | |||
e05b2670c5 | |||
5ed46e4f04 | |||
5bad88ba04 | |||
6a57472665 | |||
da17ac0f48 | |||
2674f5ea66 | |||
2b901ee572 | |||
8298f1a366 | |||
f0fb9516d8 | |||
04c411010b | |||
da29ae62d4 | |||
64c8d1da85 | |||
e586f96740 | |||
e35d46de19 | |||
b4faffea8a | |||
19145bba8a | |||
c0dd27bc97 | |||
fb607d00ad | |||
a634f6a13e | |||
dd4771b2f4 | |||
043238578f | |||
b4ac2d840b | |||
0ae52fece1 | |||
8351331d78 | |||
f1ecfa6ae6 | |||
b5a8a294ed | |||
9cdaed2769 | |||
18a0910113 | |||
99e1a55f54 | |||
21df968fd1 | |||
5a3a5acff2 | |||
70302d7b4f | |||
3ff90626b6 | |||
1877329093 | |||
98429b8184 | |||
d350a00ece | |||
ad756173f1 | |||
94877b5008 | |||
f020404ee6 | |||
79298c7c24 | |||
b25ce8a0cd | |||
5d84484079 | |||
49ddefa834 | |||
3af469eeea | |||
5e7e5ad836 | |||
9d8287f3e3 | |||
2efd02769b | |||
669dd4edeb | |||
b5641cc744 | |||
c5d94855cd | |||
face67dfeb | |||
d9094cebea | |||
493ae58beb | |||
ed4ce9fc94 | |||
4c48970cb0 | |||
46e03602ed | |||
45343a4ccc | |||
276c91b143 | |||
cfe35a7878 | |||
d47d23aa0e | |||
02f0a4ca59 | |||
23cfbf22eb | |||
9da72d25ed | |||
0ad95fa361 | |||
6960076699 | |||
bdeb06b16c | |||
884b1ac3a8 | |||
207229ad5e | |||
2464c572eb | |||
8b21a4e5ab | |||
894e68a408 | |||
7594903444 | |||
1d0535e255 | |||
56556faa17 | |||
15a013af5f | |||
45565f4357 | |||
aaa7e9f44a | |||
07f2b82dae | |||
eced2edff8 | |||
e98df91906 | |||
0c16918c34 | |||
c2c544dc9f | |||
d7f520a320 | |||
d17266d599 | |||
dfd99f61f8 | |||
dbd40d96a1 | |||
99f792e8a3 | |||
a7fb9fb090 | |||
a977ce69a5 | |||
3d0edccc4a | |||
763511dc28 | |||
1367bc6f0d | |||
88dfc5d2a8 | |||
7a5f17f39e | |||
52ff0cde9f | |||
cacee957e6 | |||
bedcaa4f82 | |||
f66c3859b0 | |||
69665f24e9 | |||
08b6665167 | |||
d54a23d30e | |||
9856f79cf9 | |||
634bd197f2 | |||
1af8ca484b | |||
1c0654b9a5 | |||
1dc4a6761e | |||
f3d4fef6e6 | |||
39264a0141 | |||
ba0477f298 | |||
139624750a | |||
1bbde1bfe0 | |||
6b4554e643 | |||
c8c936eddf | |||
93d0c03d5b | |||
5bdbf2bcd6 | |||
4c611f40b4 | |||
8bdd4848f4 | |||
b786b884f6 | |||
0fa63fb4a2 | |||
f5aae1b47d | |||
6d140bad39 | |||
1f55957402 | |||
08368a1fba | |||
20d9c175e2 | |||
d4dbf684e0 | |||
0c9354bda9 | |||
f113af0b9e | |||
43381008d6 | |||
7d99466446 | |||
ecaaae8719 | |||
0b2f950cc2 | |||
85013987aa | |||
a23b9213f4 | |||
140a69bb90 | |||
8c17d556a8 | |||
0e37b85609 | |||
6e783780ca | |||
fd1c0f66eb | |||
a4ca8fa3b6 | |||
3d9ceb5162 | |||
bbaafc2fef | |||
573cb35036 | |||
6c44096c7b | |||
4b371b489b | |||
87c1d2410e | |||
2439203eff | |||
312d294fdd | |||
369a0fba85 | |||
438b16b8c9 | |||
dbe7e644f1 | |||
a916465ad0 | |||
412d7bc985 | |||
7d44026dea | |||
ba90047d70 | |||
10cf3a4fa3 | |||
aac7722b9e | |||
ed396a69ed | |||
ec267c644a | |||
9b5808938f | |||
b10a8cedf6 | |||
bfb264ad96 | |||
702f9377e3 | |||
0e33ac1efe | |||
e27e883443 | |||
ffbb6bcf9c | |||
8541b60acb | |||
96c0277a1b | |||
b15c185939 | |||
a955ef1088 | |||
e06d94ddeb | |||
1681cebf60 | |||
a09f66c8cd | |||
1869fe6e05 | |||
1c27e24d50 | |||
30fd5a4c88 | |||
3040782e04 | |||
1b8b17de86 | |||
029f416fce | |||
a1953baef6 | |||
e90dcc4be4 | |||
71b326db68 | |||
42ab10699b | |||
5a0e19dda1 | |||
86ad5ce55c | |||
61a8e3a3bd | |||
0675541154 | |||
fa5957f7ca | |||
5265eb7ebd | |||
878a8bc990 | |||
b1bafca333 | |||
92d38b50af | |||
5de5c24a8a | |||
062d95a09e | |||
c33c42f158 | |||
c46d76ae3a | |||
4f542e319f | |||
b5e341bb8a | |||
06fd06a4d2 | |||
7d1d959879 | |||
39ef2546d5 | |||
9f7492577f | |||
bef8e3584c | |||
032fff92fb | |||
6c8659f8f9 | |||
5884bdbea4 | |||
86290e9660 | |||
563acf0832 | |||
f4526d57fc | |||
d9b0a118af | |||
f5352f08c5 | |||
48ffd07276 | |||
eb01b5ee1d | |||
a7ea02a709 | |||
66fd087205 | |||
0e8932f1cb | |||
e2b8e3260d | |||
c476c1e348 | |||
18544647ac | |||
8af8dbd2ec | |||
39fc09ec1b | |||
016722addd | |||
fd10faedfa | |||
702d06098e | |||
0b62b4378b | |||
b8b84cb6ce | |||
08cb3dde57 | |||
03eb378eb9 | |||
6b81d7179f | |||
0270b7c780 | |||
38e9c650ba | |||
9320373c12 | |||
019b7ff9d6 | |||
b519e3f9e1 | |||
e48dfc331c | |||
4d51464045 | |||
8563a63af2 | |||
eb75374fb1 | |||
1cbc985018 | |||
58f4dee67a | |||
a8d11b36a3 | |||
189a6b8e35 | |||
e45529b149 | |||
ba7b1011b8 | |||
c23be52881 | |||
7fb5f90a38 | |||
fcff23f005 | |||
42a184f742 | |||
7add756923 | |||
9914e76d5b | |||
668f045972 | |||
38f48dd769 | |||
db55fb34b8 | |||
76d4ecd40d | |||
27f956a73b | |||
dd1c0d87fe | |||
207d290865 | |||
5e8ee44091 | |||
662ebe593e | |||
c42968617b | |||
3714aa2fff | |||
0fcc30dd43 | |||
d6015bc11f | |||
4fd374e80d | |||
3d7770bfd5 | |||
f173f97e9d | |||
ef8523b5a4 | |||
63c5c9a2c0 | |||
5ed95f49d0 | |||
8a3fcd060d | |||
b1059b73aa |
71
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
71
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
name: "\U0001F41B Bug Report"
|
||||
description: Submit a bug report to help us improve the library
|
||||
body:
|
||||
- type: textarea
|
||||
id: system-info
|
||||
attributes:
|
||||
label: System Info
|
||||
description: Please share your relevant system information with us
|
||||
placeholder: peft & accelerate & transformers version, platform, python version, ...
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: who-can-help
|
||||
attributes:
|
||||
label: Who can help?
|
||||
description: |
|
||||
Your issue will be replied to more quickly if you can figure out the right person to tag with @
|
||||
If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
|
||||
|
||||
All issues are read by one of the core maintainers, so if you don't know who to tag, just leave this blank and
|
||||
a core maintainer will ping the right person.
|
||||
|
||||
Please tag fewer than 3 people.
|
||||
|
||||
Library: @pacman100 @younesbelkada @sayakpaul
|
||||
|
||||
Documentation: @stevhliu and @MKhalusova
|
||||
|
||||
placeholder: "@Username ..."
|
||||
|
||||
- type: checkboxes
|
||||
id: information-scripts-examples
|
||||
attributes:
|
||||
label: Information
|
||||
description: 'The problem arises when using:'
|
||||
options:
|
||||
- label: "The official example scripts"
|
||||
- label: "My own modified scripts"
|
||||
|
||||
- type: checkboxes
|
||||
id: information-tasks
|
||||
attributes:
|
||||
label: Tasks
|
||||
description: "The tasks I am working on are:"
|
||||
options:
|
||||
- label: "An officially supported task in the `examples` folder"
|
||||
- label: "My own task or dataset (give details below)"
|
||||
|
||||
- type: textarea
|
||||
id: reproduction
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Reproduction
|
||||
description: |
|
||||
Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet.
|
||||
Please provide the simplest reproducer as possible so that we can quickly fix the issue.
|
||||
|
||||
placeholder: |
|
||||
Reproducer:
|
||||
|
||||
|
||||
|
||||
- type: textarea
|
||||
id: expected-behavior
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Expected behavior
|
||||
description: "A clear and concise description of what you would expect to happen."
|
30
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
Normal file
30
.github/ISSUE_TEMPLATE/feature-request.yml
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
name: "\U0001F680 Feature request"
|
||||
description: Submit a proposal/request for a new feature
|
||||
labels: [ "feature" ]
|
||||
body:
|
||||
- type: textarea
|
||||
id: feature-request
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Feature request
|
||||
description: |
|
||||
A clear and concise description of the feature proposal. Please provide a link to the paper and code in case they exist.
|
||||
|
||||
- type: textarea
|
||||
id: motivation
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Motivation
|
||||
description: |
|
||||
Please outline the motivation for the proposal. Is your feature request related to a problem?
|
||||
|
||||
- type: textarea
|
||||
id: contribution
|
||||
validations:
|
||||
required: true
|
||||
attributes:
|
||||
label: Your contribution
|
||||
description: |
|
||||
Is there any way that you could help, e.g. by submitting a PR?
|
91
.github/workflows/build_docker_images.yml
vendored
91
.github/workflows/build_docker_images.yml
vendored
@ -15,10 +15,20 @@ jobs:
|
||||
name: "Latest Peft CPU [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
@ -36,10 +46,20 @@ jobs:
|
||||
name: "Latest Peft GPU [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
@ -47,8 +67,71 @@ jobs:
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/peft-gpu
|
||||
push: true
|
||||
tags: huggingface/peft-gpu
|
||||
tags: huggingface/peft-gpu
|
||||
|
||||
latest-cuda-bnb-source:
|
||||
name: "Latest Peft GPU + bnb source [dev]"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/peft-gpu-bnb-source
|
||||
push: true
|
||||
tags: huggingface/peft-gpu-bnb-source
|
||||
|
||||
|
||||
latest-cuda-bnb-source-latest:
|
||||
name: "Latest Peft GPU + bnb source [accelerate / peft / transformers latest]"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Cleanup disk
|
||||
run: |
|
||||
sudo ls -l /usr/local/lib/
|
||||
sudo ls -l /usr/share/
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo du -sh /usr/local/lib/
|
||||
sudo du -sh /usr/share/
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_PASSWORD }}
|
||||
|
||||
- name: Build and Push GPU
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: ./docker/peft-gpu-bnb-latest
|
||||
push: true
|
||||
tags: huggingface/peft-gpu-bnb-latest
|
||||
|
3
.github/workflows/build_documentation.yml
vendored
3
.github/workflows/build_documentation.yml
vendored
@ -15,4 +15,5 @@ jobs:
|
||||
package: peft
|
||||
notebook_folder: peft_docs
|
||||
secrets:
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
token: ${{ secrets.HUGGINGFACE_PUSH }}
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
13
.github/workflows/delete_doc_comment.yml
vendored
13
.github/workflows/delete_doc_comment.yml
vendored
@ -1,13 +0,0 @@
|
||||
name: Delete dev documentation
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ closed ]
|
||||
|
||||
|
||||
jobs:
|
||||
delete:
|
||||
uses: huggingface/doc-builder/.github/workflows/delete_doc_comment.yml@main
|
||||
with:
|
||||
pr_number: ${{ github.event.number }}
|
||||
package: peft
|
82
.github/workflows/integrations_tests.yml
vendored
Normal file
82
.github/workflows/integrations_tests.yml
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
name: integration tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
description: 'Branch to test on'
|
||||
required: true
|
||||
|
||||
jobs:
|
||||
run_transformers_integration_tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
transformers-version: ['main', 'latest']
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "pip"
|
||||
cache-dependency-path: "setup.py"
|
||||
- name: print environment variables
|
||||
run: |
|
||||
echo "env.CI_BRANCH = ${{ env.CI_BRANCH }}"
|
||||
echo "env.CI_SHA = ${{ env.CI_SHA }}"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install .[test]
|
||||
if [ "${{ matrix.transformers-version }}" == "main" ]; then
|
||||
pip install -U git+https://github.com/huggingface/transformers.git
|
||||
else
|
||||
echo "Nothing to do as transformers latest already installed"
|
||||
fi
|
||||
|
||||
- name: Test transformers integration
|
||||
run: |
|
||||
cd .. && git clone https://github.com/huggingface/transformers.git && cd transformers/ && git rev-parse HEAD
|
||||
RUN_SLOW=1 pytest tests/peft_integration/test_peft_integration.py
|
||||
run_diffusers_integration_tests:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
# For now diffusers integration is not on PyPI
|
||||
diffusers-version: ['main']
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "pip"
|
||||
cache-dependency-path: "setup.py"
|
||||
- name: print environment variables
|
||||
run: |
|
||||
echo "env.CI_BRANCH = ${{ env.CI_BRANCH }}"
|
||||
echo "env.CI_SHA = ${{ env.CI_SHA }}"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install .[test]
|
||||
|
||||
if [ "${{ matrix.diffusers-version }}" == "main" ]; then
|
||||
pip install -U git+https://github.com/huggingface/diffusers.git
|
||||
else
|
||||
echo "Nothing to do as diffusers latest already installed"
|
||||
fi
|
||||
|
||||
- name: Test diffusers integration
|
||||
run: |
|
||||
cd .. && git clone https://github.com/huggingface/diffusers.git && cd diffusers/ && git rev-parse HEAD
|
||||
pytest tests/lora/test_lora_layers_peft.py
|
131
.github/workflows/nightly-bnb.yml
vendored
Normal file
131
.github/workflows/nightly-bnb.yml
vendored
Normal file
@ -0,0 +1,131 @@
|
||||
name: BNB from source self-hosted runner with slow tests (scheduled)
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
schedule:
|
||||
- cron: "0 2 * * *"
|
||||
|
||||
env:
|
||||
RUN_SLOW: "yes"
|
||||
IS_GITHUB_CI: "1"
|
||||
# To be able to run tests on CUDA 12.2
|
||||
NVIDIA_DISABLE_REQUIRE: "1"
|
||||
SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
|
||||
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
docker-image-name: ["huggingface/peft-gpu-bnb-source:latest", "huggingface/peft-gpu-bnb-latest:latest"]
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu_${{ matrix.docker-image-name }}"
|
||||
container:
|
||||
image: ${{ matrix.docker-image-name }}
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Pip install
|
||||
run: |
|
||||
source activate peft
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog pytest-cov parameterized datasets scipy einops
|
||||
mkdir transformers-clone && git clone https://github.com/huggingface/transformers.git transformers-clone # rename to transformers clone to avoid modules conflict
|
||||
if [ "${{ matrix.docker-image-name }}" == "huggingface/peft-gpu-bnb-latest:latest" ]; then
|
||||
cd transformers-clone
|
||||
transformers_version=$(pip show transformers | grep '^Version:' | cut -d ' ' -f2 | sed 's/\.dev0//')
|
||||
echo "Checking out tag for Transformers version: v$transformers_version"
|
||||
git fetch --tags
|
||||
git checkout tags/v$transformers_version
|
||||
cd ..
|
||||
fi
|
||||
- name: Run examples on single GPU
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_examples_single_gpu_bnb
|
||||
|
||||
- name: Run core tests on single GPU
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_core_single_gpu_bnb
|
||||
|
||||
- name: Run transformers tests on single GPU
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make transformers_tests
|
||||
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
python scripts/log_reports.py --slack_channel_name bnb-daily-ci >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
docker-image-name: ["huggingface/peft-gpu-bnb-source:latest", "huggingface/peft-gpu-bnb-latest:latest"]
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu_${{ matrix.docker-image-name }}"
|
||||
container:
|
||||
image: ${{ matrix.docker-image-name }}
|
||||
options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Pip install
|
||||
run: |
|
||||
source activate peft
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog pytest-cov parameterized datasets scipy einops
|
||||
mkdir transformers-clone && git clone https://github.com/huggingface/transformers.git transformers-clone
|
||||
if [ "${{ matrix.docker-image-name }}" == "huggingface/peft-gpu-bnb-latest:latest" ]; then
|
||||
cd transformers-clone
|
||||
transformers_version=$(pip show transformers | grep '^Version:' | cut -d ' ' -f2 | sed 's/\.dev0//')
|
||||
echo "Checking out tag for Transformers version: v$transformers_version"
|
||||
git fetch --tags
|
||||
git checkout tags/v$transformers_version
|
||||
cd ..
|
||||
fi
|
||||
|
||||
- name: Run core GPU tests on multi-gpu
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
|
||||
- name: Run examples on multi GPU
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_examples_multi_gpu_bnb
|
||||
|
||||
- name: Run core tests on multi GPU
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_core_multi_gpu_bnb
|
||||
|
||||
- name: Run transformers tests on multi GPU
|
||||
if: always()
|
||||
run: |
|
||||
source activate peft
|
||||
make transformers_tests
|
||||
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
run: |
|
||||
pip install slack_sdk tabulate
|
||||
python scripts/log_reports.py --slack_channel_name bnb-daily-ci >> $GITHUB_STEP_SUMMARY
|
31
.github/workflows/nightly.yml
vendored
31
.github/workflows/nightly.yml
vendored
@ -8,28 +8,30 @@ on:
|
||||
env:
|
||||
RUN_SLOW: "yes"
|
||||
IS_GITHUB_CI: "1"
|
||||
# To be able to run tests on CUDA 12.2
|
||||
NVIDIA_DISABLE_REQUIRE: "1"
|
||||
SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
|
||||
|
||||
|
||||
jobs:
|
||||
run_all_tests_single_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0"
|
||||
TEST_TYPE: "single_gpu"
|
||||
container:
|
||||
image: huggingface/peft-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
options: --gpus all --shm-size "16gb" -e NVIDIA_DISABLE_REQUIRE=true
|
||||
defaults:
|
||||
run:
|
||||
working-directory: peft/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone & pip install
|
||||
- uses: actions/checkout@v3
|
||||
- name: Pip install
|
||||
run: |
|
||||
source activate peft
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog
|
||||
|
||||
@ -47,6 +49,11 @@ jobs:
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_core_single_gpu
|
||||
|
||||
- name: Run regression tests on single GPU
|
||||
run: |
|
||||
source activate peft
|
||||
make tests_regression
|
||||
|
||||
- name: Generate Report
|
||||
if: always()
|
||||
@ -55,23 +62,23 @@ jobs:
|
||||
python scripts/log_reports.py >> $GITHUB_STEP_SUMMARY
|
||||
|
||||
run_all_tests_multi_gpu:
|
||||
runs-on: [self-hosted, docker-gpu, multi-gpu]
|
||||
strategy:
|
||||
fail-fast: false
|
||||
runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
|
||||
env:
|
||||
CUDA_VISIBLE_DEVICES: "0,1"
|
||||
TEST_TYPE: "multi_gpu"
|
||||
container:
|
||||
image: huggingface/peft-gpu:latest
|
||||
options: --gpus all --shm-size "16gb"
|
||||
options: --gpus all --shm-size "16gb" -e NVIDIA_DISABLE_REQUIRE=true
|
||||
defaults:
|
||||
run:
|
||||
working-directory: peft/
|
||||
shell: bash
|
||||
steps:
|
||||
- name: Update clone
|
||||
- uses: actions/checkout@v3
|
||||
- name: Pip install
|
||||
run: |
|
||||
source activate peft
|
||||
git config --global --add safe.directory '*'
|
||||
git fetch && git checkout ${{ github.sha }}
|
||||
pip install -e . --no-deps
|
||||
pip install pytest-reportlog
|
||||
|
||||
|
4
.github/workflows/tests.yml
vendored
4
.github/workflows/tests.yml
vendored
@ -28,7 +28,7 @@ jobs:
|
||||
needs: check_code_quality
|
||||
strategy:
|
||||
matrix:
|
||||
python-version: ["3.8", "3.9", "3.10"]
|
||||
python-version: ["3.8", "3.9", "3.10", "3.11"]
|
||||
os: ["ubuntu-latest", "macos-latest", "windows-latest"]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
@ -43,7 +43,7 @@ jobs:
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
# cpu version of pytorch
|
||||
pip install .[test]
|
||||
pip install -e .[test]
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
make test
|
||||
|
43
.github/workflows/torch_compile_tests.yml
vendored
Normal file
43
.github/workflows/torch_compile_tests.yml
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
name: torch compile tests
|
||||
|
||||
# see peft/tests/__init__.py
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
branch:
|
||||
description: 'Branch to test on'
|
||||
required: true
|
||||
pytorch_nightly:
|
||||
description: 'Whether to use PyTorch nightly (true/false)'
|
||||
required: false
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
run_tests_with_compile:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
PEFT_DEBUG_WITH_TORCH_COMPILE: 1
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: "3.10"
|
||||
cache: "pip"
|
||||
cache-dependency-path: "setup.py"
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install .[test]
|
||||
if [ "${{ github.event.inputs.pytorch_nightly }}" = "true" ]; then
|
||||
python -m pip install --upgrade --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu
|
||||
fi
|
||||
- name: Test compile with pytest
|
||||
run: |
|
||||
echo "PEFT_DEBUG_WITH_TORCH_COMPILE=$PEFT_DEBUG_WITH_TORCH_COMPILE"
|
||||
git status
|
||||
make test
|
16
.github/workflows/upload_pr_documentation.yml
vendored
Normal file
16
.github/workflows/upload_pr_documentation.yml
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
name: Upload PR Documentation
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Build PR Documentation"]
|
||||
types:
|
||||
- completed
|
||||
|
||||
jobs:
|
||||
build:
|
||||
uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
|
||||
with:
|
||||
package_name: peft
|
||||
secrets:
|
||||
hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
|
||||
comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
|
19
Makefile
19
Makefile
@ -34,3 +34,22 @@ tests_core_single_gpu:
|
||||
tests_common_gpu:
|
||||
python -m pytest tests/test_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_decoder.log",)
|
||||
python -m pytest tests/test_encoder_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_encoder_decoder.log",)
|
||||
|
||||
tests_examples_multi_gpu_bnb:
|
||||
python -m pytest -m "multi_gpu_tests and bitsandbytes" tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "multi_gpu_examples.log",)
|
||||
|
||||
tests_examples_single_gpu_bnb:
|
||||
python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "single_gpu_examples.log",)
|
||||
|
||||
tests_core_multi_gpu_bnb:
|
||||
python -m pytest -m "multi_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_multi_gpu.log",)
|
||||
|
||||
tests_core_single_gpu_bnb:
|
||||
python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",)
|
||||
|
||||
# For testing transformers tests for bnb runners
|
||||
transformers_tests:
|
||||
RUN_SLOW=1 python -m pytest transformers-clone/tests/quantization/bnb $(if $(IS_GITHUB_CI),--report-log "transformers_tests.log",)
|
||||
|
||||
tests_regression:
|
||||
python -m pytest -s --regression tests/regression/ $(if $(IS_GITHUB_CI),--report-log "regression_tests.log",)
|
||||
|
206
README.md
206
README.md
@ -30,6 +30,12 @@ Supported methods:
|
||||
3. P-Tuning: [GPT Understands, Too](https://arxiv.org/abs/2103.10385)
|
||||
4. Prompt Tuning: [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/abs/2104.08691)
|
||||
5. AdaLoRA: [Adaptive Budget Allocation for Parameter-Efficient Fine-Tuning](https://arxiv.org/abs/2303.10512)
|
||||
6. $(IA)^3$: [Few-Shot Parameter-Efficient Fine-Tuning is Better and Cheaper than In-Context Learning](https://arxiv.org/abs/2205.05638)
|
||||
7. MultiTask Prompt Tuning: [Multitask Prompt Tuning Enables Parameter-Efficient Transfer Learning](https://arxiv.org/abs/2303.02861)
|
||||
8. LoHa: [FedPara: Low-Rank Hadamard Product for Communication-Efficient Federated Learning](https://arxiv.org/abs/2108.06098)
|
||||
9. LoKr: [KronA: Parameter Efficient Tuning with Kronecker Adapter](https://arxiv.org/abs/2212.10650) based on [Navigating Text-To-Image Customization:From LyCORIS Fine-Tuning to Model Evaluation](https://arxiv.org/abs/2309.14859) implementation
|
||||
10. LoftQ: [LoftQ: LoRA-Fine-Tuning-aware Quantization for Large Language Models](https://arxiv.org/abs/2310.08659)
|
||||
11. OFT: [Controlling Text-to-Image Diffusion by Orthogonal Finetuning](https://arxiv.org/abs/2306.07280)
|
||||
|
||||
## Getting started
|
||||
|
||||
@ -54,7 +60,7 @@ model.print_trainable_parameters()
|
||||
### Get comparable performance to full finetuning by adapting LLMs to downstream tasks using consumer hardware
|
||||
|
||||
GPU memory required for adapting LLMs on the few-shot dataset [`ought/raft/twitter_complaints`](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints). Here, settings considered
|
||||
are full finetuning, PEFT-LoRA using plain PyTorch and PEFT-LoRA using DeepSpeed with CPU Offloading.
|
||||
are full finetuning, PEFT-LoRA using plain PyTorch and PEFT-LoRA using DeepSpeed with CPU Offloading.
|
||||
|
||||
Hardware: Single A100 80GB GPU with CPU RAM above 64GB
|
||||
|
||||
@ -66,7 +72,7 @@ Hardware: Single A100 80GB GPU with CPU RAM above 64GB
|
||||
|
||||
Performance of PEFT-LoRA tuned [`bigscience/T0_3B`](https://huggingface.co/bigscience/T0_3B) on [`ought/raft/twitter_complaints`](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints) leaderboard.
|
||||
A point to note is that we didn't try to squeeze performance by playing around with input instruction templates, LoRA hyperparams and other training related hyperparams. Also, we didn't use the larger 13B [mt0-xxl](https://huggingface.co/bigscience/mt0-xxl) model.
|
||||
So, we are already seeing comparable performance to SoTA with parameter efficient tuning. Also, the final checkpoint size is just `19MB` in comparison to `11GB` size of the backbone [`bigscience/T0_3B`](https://huggingface.co/bigscience/T0_3B) model.
|
||||
So, we are already seeing comparable performance to SoTA with parameter efficient tuning. Also, the final additional checkpoint size is just `19MB` in comparison to `11GB` size of the backbone [`bigscience/T0_3B`](https://huggingface.co/bigscience/T0_3B) model, but one still has to load the original full size model.
|
||||
|
||||
| Submission Name | Accuracy |
|
||||
| --------- | ---- |
|
||||
@ -131,15 +137,17 @@ Try out the 🤗 Gradio Space which should run seamlessly on a T4 instance:
|
||||
**NEW** ✨ Multi Adapter support and combining multiple LoRA adapters in a weighted combination
|
||||

|
||||
|
||||
**NEW** ✨ Dreambooth training for Stable Diffusion using LoHa and LoKr adapters [`examples/stable_diffusion/train_dreambooth.py`](examples/stable_diffusion/train_dreambooth.py)
|
||||
|
||||
### Parameter Efficient Tuning of LLMs for RLHF components such as Ranker and Policy
|
||||
- Here is an example in [trl](https://github.com/lvwerra/trl) library using PEFT+INT8 for tuning policy model: [gpt2-sentiment_peft.py](https://github.com/lvwerra/trl/blob/main/examples/sentiment/scripts/gpt2-sentiment_peft.py) and corresponding [Blog](https://huggingface.co/blog/trl-peft)
|
||||
- Example using PEFT for Instrction finetuning, reward model and policy : [stack_llama](https://github.com/lvwerra/trl/tree/main/examples/stack_llama/scripts) and corresponding [Blog](https://huggingface.co/blog/stackllama)
|
||||
- Example using PEFT for Instruction finetuning, reward model and policy : [stack_llama](https://github.com/lvwerra/trl/tree/main/examples/research_projects/stack_llama/scripts) and corresponding [Blog](https://huggingface.co/blog/stackllama)
|
||||
|
||||
### INT8 training of large models in Colab using PEFT LoRA and bits_and_bytes
|
||||
### INT8 training of large models in Colab using PEFT LoRA and bitsandbytes
|
||||
|
||||
- Here is now a demo on how to fine tune [OPT-6.7b](https://huggingface.co/facebook/opt-6.7b) (14GB in fp16) in a Google Colab: [](https://colab.research.google.com/drive/1jCkpikz0J2o20FBQmYmAGdiKmJGOMo-o?usp=sharing)
|
||||
|
||||
- Here is now a demo on how to fine tune [whishper-large](openai/whisper-large-v2) (1.5B params) (14GB in fp16) in a Google Colab: [](https://colab.research.google.com/drive/1DOkD_5OUjFa0r5Ik3SgywJLJtEo2qLxO?usp=sharing) and [](https://colab.research.google.com/drive/1vhF8yueFqha3Y3CpTHN6q9EVcII9EYzs?usp=sharing)
|
||||
- Here is now a demo on how to fine tune [whisper-large](https://huggingface.co/openai/whisper-large-v2) (1.5B params) (14GB in fp16) in a Google Colab: [](https://colab.research.google.com/drive/1DOkD_5OUjFa0r5Ik3SgywJLJtEo2qLxO?usp=sharing) and [](https://colab.research.google.com/drive/1vhF8yueFqha3Y3CpTHN6q9EVcII9EYzs?usp=sharing)
|
||||
|
||||
### Save compute and storage even for medium and small models
|
||||
|
||||
@ -147,7 +155,7 @@ Save storage by avoiding full finetuning of models on each of the downstream tas
|
||||
With PEFT methods, users only need to store tiny checkpoints in the order of `MBs` all the while retaining
|
||||
performance comparable to full finetuning.
|
||||
|
||||
An example of using LoRA for the task of adapting `LayoutLMForTokenClassification` on `FUNSD` dataset is given in `~examples/token_classification/PEFT_LoRA_LayoutLMForTokenClassification_on_FUNSD.py`. We can observe that with only `0.62 %` of parameters being trainable, we achieve performance (F1 0.777) comparable to full finetuning (F1 0.786) (without any hyerparam tuning runs for extracting more performance), and the checkpoint of this is only `2.8MB`. Now, if there are `N` such datasets, just have these PEFT models one for each dataset and save a lot of storage without having to worry about the problem of catastrophic forgetting or overfitting of backbone/base model.
|
||||
An example of using LoRA for the task of adapting `LayoutLMForTokenClassification` on `FUNSD` dataset is given in `~examples/token_classification/PEFT_LoRA_LayoutLMForTokenClassification_on_FUNSD.py`. We can observe that with only `0.62 %` of parameters being trainable, we achieve performance (F1 0.777) comparable to full finetuning (F1 0.786) (without any hyperparam tuning runs for extracting more performance), and the checkpoint of this is only `2.8MB`. Now, if there are `N` such datasets, just have these PEFT models one for each dataset and save a lot of storage without having to worry about the problem of catastrophic forgetting or overfitting of backbone/base model.
|
||||
|
||||
Another example is fine-tuning [`roberta-large`](https://huggingface.co/roberta-large) on [`MRPC` GLUE](https://huggingface.co/datasets/glue/viewer/mrpc) dataset using different PEFT methods. The notebooks are given in `~examples/sequence_classification`.
|
||||
|
||||
@ -217,74 +225,77 @@ DeepSpeed version required `v0.8.0`. An example is provided in `~examples/condit
|
||||
```
|
||||
|
||||
### Example of PEFT model inference using 🤗 Accelerate's Big Model Inferencing capabilities
|
||||
An example is provided in `~examples/causal_language_modeling/peft_lora_clm_accelerate_big_model_inference.ipynb`.
|
||||
An example is provided in [this notebook](https://github.com/huggingface/peft/blob/main/examples/causal_language_modeling/peft_lora_clm_accelerate_big_model_inference.ipynb).
|
||||
|
||||
|
||||
## Models support matrix
|
||||
|
||||
Find models that are supported out of the box below. Note that PEFT works with almost all models -- if it is not listed, you just need to [do some manual configuration](https://huggingface.co/docs/peft/developer_guides/custom_models).
|
||||
|
||||
### Causal Language Modeling
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
|--------------| ---- | ---- | ---- | ---- |
|
||||
| GPT-2 | ✅ | ✅ | ✅ | ✅ |
|
||||
| Bloom | ✅ | ✅ | ✅ | ✅ |
|
||||
| OPT | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-Neo | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-J | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-NeoX-20B | ✅ | ✅ | ✅ | ✅ |
|
||||
| LLaMA | ✅ | ✅ | ✅ | ✅ |
|
||||
| ChatGLM | ✅ | ✅ | ✅ | ✅ |
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 |
|
||||
|--------------| ---- | ---- | ---- | ---- | ---- |
|
||||
| GPT-2 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| Bloom | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| OPT | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-Neo | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-J | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-NeoX-20B | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| LLaMA | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| ChatGLM | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| Mistral | ✅ | | | | |
|
||||
|
||||
### Conditional Generation
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| T5 | ✅ | ✅ | ✅ | ✅ |
|
||||
| BART | ✅ | ✅ | ✅ | ✅ |
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 |
|
||||
| --------- | ---- | ---- | ---- | ---- | ---- |
|
||||
| T5 | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| BART | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
|
||||
### Sequence Classification
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| BERT | ✅ | ✅ | ✅ | ✅ |
|
||||
| RoBERTa | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-2 | ✅ | ✅ | ✅ | ✅ |
|
||||
| Bloom | ✅ | ✅ | ✅ | ✅ |
|
||||
| OPT | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-Neo | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-J | ✅ | ✅ | ✅ | ✅ |
|
||||
| Deberta | ✅ | | ✅ | ✅ |
|
||||
| Deberta-v2 | ✅ | | ✅ | ✅ |
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 |
|
||||
| --------- | ---- | ---- | ---- | ---- | ---- |
|
||||
| BERT | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| RoBERTa | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-2 | ✅ | ✅ | ✅ | ✅ | |
|
||||
| Bloom | ✅ | ✅ | ✅ | ✅ | |
|
||||
| OPT | ✅ | ✅ | ✅ | ✅ | |
|
||||
| GPT-Neo | ✅ | ✅ | ✅ | ✅ | |
|
||||
| GPT-J | ✅ | ✅ | ✅ | ✅ | |
|
||||
| Deberta | ✅ | | ✅ | ✅ | |
|
||||
| Deberta-v2 | ✅ | | ✅ | ✅ | |
|
||||
|
||||
### Token Classification
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| BERT | ✅ | ✅ | | |
|
||||
| RoBERTa | ✅ | ✅ | | |
|
||||
| GPT-2 | ✅ | ✅ | | |
|
||||
| Bloom | ✅ | ✅ | | |
|
||||
| OPT | ✅ | ✅ | | |
|
||||
| GPT-Neo | ✅ | ✅ | | |
|
||||
| GPT-J | ✅ | ✅ | | |
|
||||
| Deberta | ✅ | | | |
|
||||
| Deberta-v2 | ✅ | | | |
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 |
|
||||
| --------- | ---- | ---- | ---- | ---- | ---- |
|
||||
| BERT | ✅ | ✅ | | | |
|
||||
| RoBERTa | ✅ | ✅ | | | |
|
||||
| GPT-2 | ✅ | ✅ | | | |
|
||||
| Bloom | ✅ | ✅ | | | |
|
||||
| OPT | ✅ | ✅ | | | |
|
||||
| GPT-Neo | ✅ | ✅ | | | |
|
||||
| GPT-J | ✅ | ✅ | | | |
|
||||
| Deberta | ✅ | | | | |
|
||||
| Deberta-v2 | ✅ | | | | |
|
||||
|
||||
### Text-to-Image Generation
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| Stable Diffusion | ✅ | | | |
|
||||
| Model | LoRA | LoHa | LoKr | OFT | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 |
|
||||
| --------- | ---- | ---- | ---- | ---- | ---- | ---- | ---- | ---- |
|
||||
| Stable Diffusion | ✅ | ✅ | ✅ | ✅ | | | |
|
||||
|
||||
|
||||
### Image Classification
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| ViT | ✅ | | | |
|
||||
| Swin | ✅ | | | |
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 |
|
||||
| --------- | ---- | ---- | ---- | ---- | ---- |
|
||||
| ViT | ✅ | | | | |
|
||||
| Swin | ✅ | | | | |
|
||||
|
||||
### Image to text (Multi-modal models)
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| Blip-2 | ✅ | | | |
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3
|
||||
| --------- | ---- | ---- | ---- | ---- | ---- |
|
||||
| Blip-2 | ✅ | | | | |
|
||||
|
||||
___Note that we have tested LoRA for [ViT](https://huggingface.co/docs/transformers/model_doc/vit) and [Swin](https://huggingface.co/docs/transformers/model_doc/swin) for fine-tuning on image classification. However, it should be possible to use LoRA for any compatible model [provided](https://huggingface.co/models?pipeline_tag=image-classification&sort=downloads&search=vit) by 🤗 Transformers. Check out the respective
|
||||
examples to learn more. If you run into problems, please open an issue.___
|
||||
@ -293,9 +304,9 @@ The same principle applies to our [segmentation models](https://huggingface.co/m
|
||||
|
||||
### Semantic Segmentation
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| SegFormer | ✅ | | | |
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning | IA3 |
|
||||
| --------- | ---- | ---- | ---- | ---- | ---- |
|
||||
| SegFormer | ✅ | | | | |
|
||||
|
||||
|
||||
## Caveats:
|
||||
@ -352,19 +363,80 @@ any GPU memory savings. Please refer issue [[FSDP] FSDP with CPU offload consume
|
||||
accelerate launch --config_file fsdp_config.yaml examples/peft_lora_seq2seq_accelerate_fsdp.py
|
||||
```
|
||||
|
||||
2. When using `P_TUNING` or `PROMPT_TUNING` with `SEQ_2_SEQ` task, remember to remove the `num_virtual_token` virtual prompt predictions from the left side of the model outputs during evaluations.
|
||||
2. When using ZeRO3 with zero3_init_flag=True, if you find the gpu memory increase with training steps. we might need to update deepspeed after [deepspeed commit 42858a9891422abc](https://github.com/microsoft/DeepSpeed/commit/42858a9891422abcecaa12c1bd432d28d33eb0d4) . The related issue is [[BUG] Peft Training with Zero.Init() and Zero3 will increase GPU memory every forward step ](https://github.com/microsoft/DeepSpeed/issues/3002)
|
||||
|
||||
3. For encoder-decoder models, `P_TUNING` or `PROMPT_TUNING` doesn't support `generate` functionality of transformers because `generate` strictly requires `decoder_input_ids` but
|
||||
`P_TUNING`/`PROMPT_TUNING` appends soft prompt embeddings to `input_embeds` to create
|
||||
new `input_embeds` to be given to the model. Therefore, `generate` doesn't support this yet.
|
||||
## 🤗 PEFT as a utility library
|
||||
|
||||
4. When using ZeRO3 with zero3_init_flag=True, if you find the gpu memory increase with training steps. we might need to set zero3_init_flag=false in accelerate config.yaml. The related issue is [[BUG] memory leak under zero.Init](https://github.com/microsoft/DeepSpeed/issues/2637)
|
||||
### Injecting adapters directly into the model
|
||||
|
||||
## Backlog:
|
||||
- [x] Add tests
|
||||
- [x] Multi Adapter training and inference support
|
||||
- [x] Add more use cases and examples
|
||||
- [ ] Explore and possibly integrate `Bottleneck Adapters`, `(IA)^3`, `AdaptionPrompt` ...
|
||||
Inject trainable adapters on any `torch` model using `inject_adapter_in_model` method. Note the method will make no further change to the model.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from peft import inject_adapter_in_model, LoraConfig
|
||||
|
||||
class DummyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.embedding = torch.nn.Embedding(10, 10)
|
||||
self.linear = torch.nn.Linear(10, 10)
|
||||
self.lm_head = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, input_ids):
|
||||
x = self.embedding(input_ids)
|
||||
x = self.linear(x)
|
||||
x = self.lm_head(x)
|
||||
return x
|
||||
|
||||
lora_config = LoraConfig(
|
||||
lora_alpha=16,
|
||||
lora_dropout=0.1,
|
||||
r=64,
|
||||
bias="none",
|
||||
target_modules=["linear"],
|
||||
)
|
||||
|
||||
model = DummyModel()
|
||||
model = inject_adapter_in_model(lora_config, model)
|
||||
|
||||
dummy_inputs = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]])
|
||||
dummy_outputs = model(dummy_inputs)
|
||||
```
|
||||
|
||||
Learn more about the [low level API in the docs](https://huggingface.co/docs/peft/developer_guides/low_level_api).
|
||||
|
||||
### Mixing different adapter types
|
||||
|
||||
Ususally, it is not possible to combine different adapter types in the same model, e.g. combining LoRA with AdaLoRA, LoHa, or LoKr. Using a mixed model, this can, however, be achieved:
|
||||
|
||||
```python
|
||||
from peft import PeftMixedModel
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-OPTForCausalLM").eval()
|
||||
peft_model = PeftMixedModel.from_pretrained(model, <path-to-adapter-0>, "adapter0")
|
||||
peft_model.load_adapter(<path-to-adapter-1>, "adapter1")
|
||||
peft_model.set_adapter(["adapter0", "adapter1"])
|
||||
result = peft_model(**inputs)
|
||||
```
|
||||
|
||||
The main intent is to load already trained adapters and use this only for inference. However, it is also possible to create a PEFT model for training by passing `mixed=True` to `get_peft_model`:
|
||||
|
||||
```python
|
||||
from peft import get_peft_model, LoraConfig, LoKrConfig
|
||||
|
||||
base_model = ...
|
||||
config0 = LoraConfig(...)
|
||||
config1 = LoKrConfig(...)
|
||||
peft_model = get_peft_model(base_model, config0, "adapter0", mixed=True)
|
||||
peft_model.add_adapter(config1, "adapter1")
|
||||
peft_model.set_adapter(["adapter0", "adapter1"])
|
||||
for batch in dataloader:
|
||||
...
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
If you would like to contribute to PEFT, please check out our [contributing guide](https://huggingface.co/docs/peft/developer_guides/contributing).
|
||||
|
||||
## Citing 🤗 PEFT
|
||||
|
||||
@ -373,7 +445,7 @@ If you use 🤗 PEFT in your publication, please cite it by using the following
|
||||
```bibtex
|
||||
@Misc{peft,
|
||||
title = {PEFT: State-of-the-art Parameter-Efficient Fine-Tuning methods},
|
||||
author = {Sourab Mangrulkar, Sylvain Gugger, Lysandre Debut, Younes Belkada, Sayak Paul},
|
||||
author = {Sourab Mangrulkar and Sylvain Gugger and Lysandre Debut and Younes Belkada and Sayak Paul and Benjamin Bossan},
|
||||
howpublished = {\url{https://github.com/huggingface/peft}},
|
||||
year = {2022}
|
||||
}
|
||||
|
@ -15,6 +15,7 @@ RUN apt-get update && \
|
||||
RUN apt-get update && \
|
||||
apt install -y ffmpeg
|
||||
|
||||
RUN apt install -y libsndfile1-dev
|
||||
RUN git lfs install
|
||||
|
||||
# Create our conda env - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
|
||||
@ -31,9 +32,12 @@ SHELL ["/bin/bash", "-c"]
|
||||
# Activate the conda env and install transformers + accelerate from source
|
||||
RUN source activate peft && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
librosa \
|
||||
"soundfile>=0.12.1" \
|
||||
scipy \
|
||||
git+https://github.com/huggingface/transformers \
|
||||
git+https://github.com/huggingface/accelerate \
|
||||
git+https://github.com/huggingface/peft#egg=peft[test]
|
||||
peft[test]@git+https://github.com/huggingface/peft
|
||||
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
|
67
docker/peft-gpu-bnb-latest/Dockerfile
Normal file
67
docker/peft-gpu-bnb-latest/Dockerfile
Normal file
@ -0,0 +1,67 @@
|
||||
# Builds GPU docker image of PyTorch
|
||||
# Uses multi-staged approach to reduce size
|
||||
# Stage 1
|
||||
# Use base conda image to reduce time
|
||||
FROM continuumio/miniconda3:latest AS compile-image
|
||||
# Specify py version
|
||||
ENV PYTHON_VERSION=3.8
|
||||
# Install apt libs - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget software-properties-common git-lfs && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists*
|
||||
|
||||
# Install audio-related libraries
|
||||
RUN apt-get update && \
|
||||
apt install -y ffmpeg
|
||||
|
||||
RUN apt install -y libsndfile1-dev
|
||||
RUN git lfs install
|
||||
|
||||
# Create our conda env - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
|
||||
RUN conda create --name peft python=${PYTHON_VERSION} ipython jupyter pip
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
||||
|
||||
# Below is copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
|
||||
# We don't install pytorch here yet since CUDA isn't available
|
||||
# instead we use the direct torch wheel
|
||||
ENV PATH /opt/conda/envs/peft/bin:$PATH
|
||||
# Activate our bash shell
|
||||
RUN chsh -s /bin/bash
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:12.2.2-devel-ubuntu22.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
RUN chsh -s /bin/bash
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists*
|
||||
|
||||
# Activate the conda env and install transformers + accelerate from latest pypi
|
||||
# Also clone BNB and build it from source.
|
||||
RUN source activate peft && \
|
||||
python3 -m pip install -U --no-cache-dir \
|
||||
librosa \
|
||||
"soundfile>=0.12.1" \
|
||||
scipy \
|
||||
transformers \
|
||||
accelerate \
|
||||
peft \
|
||||
optimum \
|
||||
auto-gptq && \
|
||||
git clone https://github.com/TimDettmers/bitsandbytes && cd bitsandbytes && \
|
||||
CUDA_VERSION=121 make cuda12x && \
|
||||
python setup.py develop && \
|
||||
pip freeze | grep bitsandbytes
|
||||
|
||||
RUN echo "source activate peft" >> ~/.profile
|
||||
|
||||
# Activate the virtualenv
|
||||
CMD ["/bin/bash"]
|
67
docker/peft-gpu-bnb-source/Dockerfile
Normal file
67
docker/peft-gpu-bnb-source/Dockerfile
Normal file
@ -0,0 +1,67 @@
|
||||
# Builds GPU docker image of PyTorch
|
||||
# Uses multi-staged approach to reduce size
|
||||
# Stage 1
|
||||
# Use base conda image to reduce time
|
||||
FROM continuumio/miniconda3:latest AS compile-image
|
||||
# Specify py version
|
||||
ENV PYTHON_VERSION=3.8
|
||||
# Install apt libs - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget software-properties-common git-lfs && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists*
|
||||
|
||||
# Install audio-related libraries
|
||||
RUN apt-get update && \
|
||||
apt install -y ffmpeg
|
||||
|
||||
RUN apt install -y libsndfile1-dev
|
||||
RUN git lfs install
|
||||
|
||||
# Create our conda env - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
|
||||
RUN conda create --name peft python=${PYTHON_VERSION} ipython jupyter pip
|
||||
RUN python3 -m pip install --no-cache-dir --upgrade pip
|
||||
|
||||
# Below is copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
|
||||
# We don't install pytorch here yet since CUDA isn't available
|
||||
# instead we use the direct torch wheel
|
||||
ENV PATH /opt/conda/envs/peft/bin:$PATH
|
||||
# Activate our bash shell
|
||||
RUN chsh -s /bin/bash
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:12.2.2-devel-ubuntu22.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
RUN chsh -s /bin/bash
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists*
|
||||
|
||||
# Activate the conda env and install transformers + accelerate from source
|
||||
# Also clone BNB and build it from source.
|
||||
RUN source activate peft && \
|
||||
python3 -m pip install -U --no-cache-dir \
|
||||
librosa \
|
||||
"soundfile>=0.12.1" \
|
||||
scipy \
|
||||
git+https://github.com/huggingface/transformers \
|
||||
git+https://github.com/huggingface/accelerate \
|
||||
peft[test]@git+https://github.com/huggingface/peft \
|
||||
optimum \
|
||||
auto-gptq && \
|
||||
git clone https://github.com/TimDettmers/bitsandbytes && cd bitsandbytes && \
|
||||
CUDA_VERSION=121 make cuda12x && \
|
||||
python setup.py develop && \
|
||||
pip freeze | grep bitsandbytes
|
||||
|
||||
RUN echo "source activate peft" >> ~/.profile
|
||||
|
||||
# Activate the virtualenv
|
||||
CMD ["/bin/bash"]
|
@ -15,6 +15,7 @@ RUN apt-get update && \
|
||||
RUN apt-get update && \
|
||||
apt install -y ffmpeg
|
||||
|
||||
RUN apt install -y libsndfile1-dev
|
||||
RUN git lfs install
|
||||
|
||||
# Create our conda env - copied from https://github.com/huggingface/accelerate/blob/main/docker/accelerate-gpu/Dockerfile
|
||||
@ -28,27 +29,37 @@ ENV PATH /opt/conda/envs/peft/bin:$PATH
|
||||
# Activate our bash shell
|
||||
RUN chsh -s /bin/bash
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
# Activate the conda env and install transformers + accelerate from source
|
||||
RUN source activate peft && \
|
||||
python3 -m pip install --no-cache-dir \
|
||||
git+https://github.com/huggingface/transformers \
|
||||
git+https://github.com/huggingface/accelerate \
|
||||
git+https://github.com/huggingface/peft#egg=peft[test]
|
||||
|
||||
RUN python3 -m pip install --no-cache-dir bitsandbytes
|
||||
|
||||
# Stage 2
|
||||
FROM nvidia/cuda:11.2.2-cudnn8-devel-ubuntu20.04 AS build-image
|
||||
FROM nvidia/cuda:12.2.2-devel-ubuntu22.04 AS build-image
|
||||
COPY --from=compile-image /opt/conda /opt/conda
|
||||
ENV PATH /opt/conda/bin:$PATH
|
||||
|
||||
RUN chsh -s /bin/bash
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
RUN source activate peft && \
|
||||
python3 -m pip install --no-cache-dir bitsandbytes optimum auto-gptq
|
||||
|
||||
# Install apt libs
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl git wget && \
|
||||
apt-get clean && \
|
||||
rm -rf /var/lib/apt/lists*
|
||||
|
||||
# Activate the conda env and install transformers + accelerate from source
|
||||
RUN source activate peft && \
|
||||
python3 -m pip install -U --no-cache-dir \
|
||||
librosa \
|
||||
"soundfile>=0.12.1" \
|
||||
scipy \
|
||||
git+https://github.com/huggingface/transformers \
|
||||
git+https://github.com/huggingface/accelerate \
|
||||
peft[test]@git+https://github.com/huggingface/peft
|
||||
|
||||
RUN source activate peft && \
|
||||
pip freeze | grep transformers
|
||||
|
||||
RUN echo "source activate peft" >> ~/.profile
|
||||
|
||||
# Activate the virtualenv
|
||||
CMD ["/bin/bash"]
|
||||
CMD ["/bin/bash"]
|
||||
|
@ -33,7 +33,7 @@ pip install git+https://github.com/huggingface/doc-builder
|
||||
**NOTE**
|
||||
|
||||
You only need to generate the documentation to inspect it locally (if you're planning changes and want to
|
||||
check how they look before committing for instance). You don't have to commit the built documentation.
|
||||
check how they look before committing for instance). You don't have to commit to the built documentation.
|
||||
|
||||
---
|
||||
|
||||
@ -46,7 +46,7 @@ typing the following command:
|
||||
doc-builder build peft docs/source/ --build_dir ~/tmp/test-build
|
||||
```
|
||||
|
||||
You can adapt the `--build_dir` to set any temporary folder that you prefer. This command will create it and generate
|
||||
You can adapt the `--build_dir` to set any temporary folder you prefer. This command will create it and generate
|
||||
the MDX files that will be rendered as the documentation on the main website. You can inspect them in your favorite
|
||||
Markdown editor.
|
||||
|
||||
@ -124,7 +124,7 @@ Adding a new tutorial or section is done in two steps:
|
||||
- Link that file in `./source/_toctree.yml` on the correct toc-tree.
|
||||
|
||||
Make sure to put your new file under the proper section. It's unlikely to go in the first section (*Get Started*), so
|
||||
depending on the intended targets (beginners, more advanced users, or researchers) it should go in sections two, three, or
|
||||
depending on the intended targets (beginners, more advanced users, or researchers) it should go into sections two, three, or
|
||||
four.
|
||||
|
||||
### Writing source documentation
|
||||
@ -188,7 +188,7 @@ then its documentation should look like this:
|
||||
```
|
||||
|
||||
Note that we always omit the "defaults to \`None\`" when None is the default for any argument. Also note that even
|
||||
if the first line describing your argument type and its default gets long, you can't break it on several lines. You can
|
||||
if the first line describing your argument type and its default gets long, you can't break it into several lines. You can
|
||||
however write as many lines as you want in the indented description (see the example above with `input_ids`).
|
||||
|
||||
#### Writing a multi-line code block
|
||||
@ -234,13 +234,13 @@ We have an automatic script running with the `make style` comment that will make
|
||||
- the docstrings fully take advantage of the line width
|
||||
- all code examples are formatted using black, like the code of the Transformers library
|
||||
|
||||
This script may have some weird failures if you made a syntax mistake or if you uncover a bug. Therefore, it's
|
||||
This script may have some weird failures if you make a syntax mistake or if you uncover a bug. Therefore, it's
|
||||
recommended to commit your changes before running `make style`, so you can revert the changes done by that script
|
||||
easily.
|
||||
|
||||
## Writing documentation examples
|
||||
|
||||
The syntax for Example docstrings can look as follows:
|
||||
The syntax, for example, docstrings can look as follows:
|
||||
|
||||
```
|
||||
Example:
|
||||
@ -264,4 +264,4 @@ is to be used in inference and also include the expected (ideally sensible)
|
||||
output.
|
||||
Often, readers will try out the example before even going through the function
|
||||
or class definitions. Therefore, it is of utmost importance that the example
|
||||
works as expected.
|
||||
works as expected.
|
||||
|
@ -7,24 +7,48 @@
|
||||
- local: install
|
||||
title: Installation
|
||||
|
||||
- title: Task guides
|
||||
- title: Tutorial
|
||||
sections:
|
||||
- local: task_guides/image_classification_lora
|
||||
title: Image classification using LoRA
|
||||
- local: task_guides/seq2seq-prefix-tuning
|
||||
title: Prefix tuning for conditional generation
|
||||
- local: task_guides/clm-prompt-tuning
|
||||
title: Prompt tuning for causal language modeling
|
||||
- local: task_guides/semantic_segmentation_lora
|
||||
title: Semantic segmentation using LoRA
|
||||
- local: task_guides/ptuning-seq-classification
|
||||
title: P-tuning for sequence classification
|
||||
- local: task_guides/dreambooth_lora
|
||||
title: Dreambooth fine-tuning with LoRA
|
||||
- local: task_guides/token-classification-lora
|
||||
title: LoRA for token classification
|
||||
- local: task_guides/int8-asr
|
||||
title: int8 training for automatic speech recognition
|
||||
- local: tutorial/peft_model_config
|
||||
title: Configurations and models
|
||||
- local: tutorial/peft_integrations
|
||||
title: Integrations
|
||||
|
||||
- title: PEFT method guides
|
||||
sections:
|
||||
- local: task_guides/prompt_based_methods
|
||||
title: Prompt-based methods
|
||||
- title: LoRA
|
||||
sections:
|
||||
- local: task_guides/image_classification_lora
|
||||
title: Image classification
|
||||
- local: task_guides/semantic_segmentation_lora
|
||||
title: Semantic segmentation
|
||||
- local: task_guides/token-classification-lora
|
||||
title: Token classification
|
||||
- local: task_guides/semantic-similarity-lora
|
||||
title: Semantic similarity
|
||||
- local: task_guides/int8-asr
|
||||
title: int8 training for automatic speech recognition
|
||||
- local: task_guides/dreambooth_lora
|
||||
title: DreamBooth
|
||||
|
||||
- title: Developer guides
|
||||
sections:
|
||||
- local: developer_guides/quantization
|
||||
title: Quantization
|
||||
- local: developer_guides/lora
|
||||
title: LoRA
|
||||
- local: developer_guides/custom_models
|
||||
title: Working with custom models
|
||||
- local: developer_guides/low_level_api
|
||||
title: PEFT low level API
|
||||
- local: developer_guides/mixed_models
|
||||
title: Mixing different adapter types
|
||||
- local: developer_guides/contributing
|
||||
title: Contributing to PEFT
|
||||
- local: developer_guides/troubleshooting
|
||||
title: Troubleshooting
|
||||
|
||||
- title: 🤗 Accelerate integrations
|
||||
sections:
|
||||
@ -35,16 +59,51 @@
|
||||
|
||||
- title: Conceptual guides
|
||||
sections:
|
||||
- local: conceptual_guides/lora
|
||||
title: LoRA
|
||||
- local: conceptual_guides/adapter
|
||||
title: Adapters
|
||||
- local: conceptual_guides/prompting
|
||||
title: Prompting
|
||||
title: Soft prompts
|
||||
- local: conceptual_guides/ia3
|
||||
title: IA3
|
||||
|
||||
- sections:
|
||||
- sections:
|
||||
- local: package_reference/auto_class
|
||||
title: AutoPeftModel
|
||||
- local: package_reference/peft_model
|
||||
title: PEFT model
|
||||
- local: package_reference/peft_types
|
||||
title: PEFT types
|
||||
- local: package_reference/config
|
||||
title: Configuration
|
||||
- local: package_reference/tuners
|
||||
title: Tuner
|
||||
title: Main classes
|
||||
- sections:
|
||||
- local: package_reference/adalora
|
||||
title: AdaLoRA
|
||||
- local: package_reference/ia3
|
||||
title: IA3
|
||||
- local: package_reference/llama_adapter
|
||||
title: Llama-Adapter
|
||||
- local: package_reference/loha
|
||||
title: LoHa
|
||||
- local: package_reference/lokr
|
||||
title: LoKr
|
||||
- local: package_reference/lora
|
||||
title: LoRA
|
||||
- local: package_reference/adapter_utils
|
||||
title: LyCORIS
|
||||
- local: package_reference/multitask_prompt_tuning
|
||||
title: Multitask Prompt Tuning
|
||||
- local: package_reference/oft
|
||||
title: OFT
|
||||
- local: package_reference/p_tuning
|
||||
title: P-tuning
|
||||
- local: package_reference/prefix_tuning
|
||||
title: Prefix tuning
|
||||
- local: package_reference/prompt_tuning
|
||||
title: Prompt tuning
|
||||
title: Adapters
|
||||
title: API reference
|
||||
|
||||
- title: Reference
|
||||
sections:
|
||||
- local: package_reference/peft_model
|
||||
title: PEFT model
|
||||
- local: package_reference/config
|
||||
title: Configuration
|
||||
- local: package_reference/tuners
|
||||
title: Tuners
|
@ -1,3 +1,7 @@
|
||||
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# DeepSpeed
|
||||
|
||||
[DeepSpeed](https://www.deepspeed.ai/) is a library designed for speed and scale for distributed training of large models with billions of parameters. At its core is the Zero Redundancy Optimizer (ZeRO) that shards optimizer states (ZeRO-1), gradients (ZeRO-2), and parameters (ZeRO-3) across data parallel processes. This drastically reduces memory usage, allowing you to scale your training to billion parameter models. To unlock even more memory efficiency, ZeRO-Offload reduces GPU compute and memory by leveraging CPU resources during optimization.
|
@ -1,3 +1,7 @@
|
||||
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Fully Sharded Data Parallel
|
||||
|
||||
[Fully sharded data parallel](https://pytorch.org/docs/stable/fsdp.html) (FSDP) is developed for distributed training of large pretrained models up to 1T parameters. FSDP achieves this by sharding the model parameters, gradients, and optimizer states across data parallel processes and it can also offload sharded model parameters to a CPU. The memory efficiency afforded by FSDP allows you to scale training to larger batch or model sizes.
|
89
docs/source/conceptual_guides/adapter.md
Normal file
89
docs/source/conceptual_guides/adapter.md
Normal file
@ -0,0 +1,89 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Adapters
|
||||
|
||||
Adapter-based methods add extra trainable parameters after the attention and fully-connected layers of a frozen pretrained model to reduce memory-usage and speed up training. The method varies depending on the adapter, it could simply be an extra added layer or it could be expressing the weight updates ∆W as a low-rank decomposition of the weight matrix. Either way, the adapters are typically small but demonstrate comparable performance to a fully finetuned model and enable training larger models with fewer resources.
|
||||
|
||||
This guide will give you a brief overview of the adapter methods supported by PEFT (if you're interested in learning more details about a specific method, take a look at the linked paper).
|
||||
|
||||
## Low-Rank Adaptation (LoRA)
|
||||
|
||||
<Tip>
|
||||
|
||||
LoRA is one of the most popular PEFT methods and a good starting point if you're just getting started with PEFT. It was originally developed for large language models but it is a tremendously popular training method for diffusion models because of its efficiency and effectiveness.
|
||||
|
||||
</Tip>
|
||||
|
||||
As mentioned briefly earlier, [LoRA](https://hf.co/papers/2106.09685) is a technique that accelerates finetuning large models while consuming less memory.
|
||||
|
||||
LoRA represents the weight updates ∆W with two smaller matrices (called *update matrices*) through low-rank decomposition. These new matrices can be trained to adapt to the new data while keeping the overall number of parameters low. The original weight matrix remains frozen and doesn't receive any further updates. To produce the final results, the original and extra adapted weights are combined. You could also merge the adapter weights with the base model to eliminate inference latency.
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/lora_animated.gif"/>
|
||||
</div>
|
||||
|
||||
This approach has a number of advantages:
|
||||
|
||||
* LoRA makes finetuning more efficient by drastically reducing the number of trainable parameters.
|
||||
* The original pretrained weights are kept frozen, which means you can have multiple lightweight and portable LoRA models for various downstream tasks built on top of them.
|
||||
* LoRA is orthogonal to other parameter-efficient methods and can be combined with many of them.
|
||||
* Performance of models finetuned using LoRA is comparable to the performance of fully finetuned models.
|
||||
|
||||
In principle, LoRA can be applied to any subset of weight matrices in a neural network to reduce the number of trainable parameters. However, for simplicity and further parameter efficiency, LoRA is typically only applied to the attention blocks in Transformer models. The resulting number of trainable parameters in a LoRA model depends on the size of the update matrices, which is determined mainly by the rank `r` and the shape of the original weight matrix.
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/lora.png"/>
|
||||
</div>
|
||||
<small><a href="https://hf.co/papers/2103.10385">Navigating Text-To-Image Customization: From LyCORIS Fine-Tuning to Model Evaluation</a></small>
|
||||
|
||||
## Low-Rank Hadamard Product (LoHa)
|
||||
|
||||
Low-rank decomposition can impact performance because the weight updates are limited to the low-rank space, which can constrain a model's expressiveness. However, you don't necessarily want to use a larger rank because it increases the number of trainable parameters. To address this, [LoHa](https://huggingface.co/papers/2108.06098) (a method originally developed for computer vision) was applied to diffusion models where the ability to generate diverse images is an important consideration. LoHa should also work with general model types, but the embedding layers aren't currently implemented in PEFT.
|
||||
|
||||
LoHa uses the [Hadamard product](https://en.wikipedia.org/wiki/Hadamard_product_(matrices)) (element-wise product) instead of the matrix product. ∆W is represented by four smaller matrices instead of two - like in LoRA - and each pair of these low-rank matrices are combined with the Hadamard product. As a result, ∆W can have the same number of trainable parameters but a higher rank and expressivity.
|
||||
|
||||
## Low-Rank Kronecker Product (LoKr)
|
||||
|
||||
[LoKr](https://hf.co/papers/2309.14859) is very similar to LoRA and LoHa, and it is also mainly applied to diffusion models, though you could also use it with other model types. LoKr replaces the matrix product with the [Kronecker product](https://en.wikipedia.org/wiki/Kronecker_product) instead. The Kronecker product decomposition creates a block matrix which preserves the rank of the original weight matrix. Another benefit of the Kronecker product is that it can be vectorized by stacking the matrix columns. This can speed up the process because you're avoiding fully reconstructing ∆W.
|
||||
|
||||
## Orthogonal Finetuning (OFT)
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/oft.png"/>
|
||||
</div>
|
||||
<small><a href="https://hf.co/papers/2306.07280">Controlling Text-to-Image Diffusion by Orthogonal Finetuning</a></small>
|
||||
|
||||
[OFT](https://hf.co/papers/2306.07280) is a method that primarily focuses on preserving a pretrained model's generative performance in the finetuned model. It tries to maintain the same cosine similarity (hyperspherical energy) between all pairwise neurons in a layer because this better captures the semantic information among neurons. This means OFT is more capable at preserving the subject and it is better for controllable generation (similar to [ControlNet](https://huggingface.co/docs/diffusers/using-diffusers/controlnet)).
|
||||
|
||||
OFT preserves the hyperspherical energy by learning an orthogonal transformation for neurons to keep the cosine similarity between them unchanged. In practice, this means taking the matrix product of an orthogonal matrix with the pretrained weight matrix. However, to be parameter-efficient, the orthogonal matrix is represented as a block-diagonal matrix with rank `r` blocks. Whereas LoRA reduces the number of trainable parameters with low-rank structures, OFT reduces the number of trainable parameters with a sparse block-diagonal matrix structure.
|
||||
|
||||
## Adaptive Low-Rank Adaptation (AdaLoRA)
|
||||
|
||||
[AdaLoRA](https://hf.co/papers/2303.10512) manages the parameter budget introduced from LoRA by allocating more parameters - in other words, a higher rank `r` - for important weight matrices that are better adapted for a task and pruning less important ones. The rank is controlled by a method similar to singular value decomposition (SVD). The ∆W is parameterized with two orthogonal matrices and a diagonal matrix which contains singular values. This parametrization method avoids iteratively applying SVD which is computationally expensive. Based on this method, the rank of ∆W is adjusted according to an importance score. ∆W is divided into triplets and each triplet is scored according to its contribution to model performance. Triplets with low importance scores are pruned and triplets with high importance scores are kept for finetuning.
|
||||
|
||||
## Llama-Adapter
|
||||
|
||||
[Llama-Adapter](https://hf.co/papers/2303.16199) is a method for adapting Llama into a instruction-following model. To help adapt the model for instruction-following, the adapter is trained with a 52K instruction-output dataset.
|
||||
|
||||
A set of of learnable adaption prompts are prefixed to the input instruction tokens. These are inserted into the upper layers of the model because it is better to learn with the higher-level semantics of the pretrained model. The instruction-output tokens prefixed to the input guide the adaption prompt to generate a contextual response.
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/llama-adapter.png"/>
|
||||
</div>
|
||||
<small><a href="https://hf.co/papers/2303.16199">LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention</a></small>
|
||||
|
||||
To avoid adding noise to the tokens, the adapter uses zero-initialized attention. On top of this, the adapter adds a learnable gating factor (initialized with zeros) to progressively add information to the model during training. This prevents overwhelming the model's pretrained knowledge with the newly learned instructions.
|
68
docs/source/conceptual_guides/ia3.md
Normal file
68
docs/source/conceptual_guides/ia3.md
Normal file
@ -0,0 +1,68 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# IA3
|
||||
|
||||
This conceptual guide gives a brief overview of [IA3](https://arxiv.org/abs/2205.05638), a parameter-efficient fine tuning technique that is
|
||||
intended to improve over [LoRA](./lora).
|
||||
|
||||
To make fine-tuning more efficient, IA3 (Infused Adapter by Inhibiting and Amplifying Inner Activations)
|
||||
rescales inner activations with learned vectors. These learned vectors are injected in the attention and feedforward modules
|
||||
in a typical transformer-based architecture. These learned vectors are the only trainable parameters during fine-tuning, and thus the original
|
||||
weights remain frozen. Dealing with learned vectors (as opposed to learned low-rank updates to a weight matrix like LoRA)
|
||||
keeps the number of trainable parameters much smaller.
|
||||
|
||||
Being similar to LoRA, IA3 carries many of the same advantages:
|
||||
|
||||
* IA3 makes fine-tuning more efficient by drastically reducing the number of trainable parameters. (For T0, an IA3 model only has about 0.01% trainable parameters, while even LoRA has > 0.1%)
|
||||
* The original pre-trained weights are kept frozen, which means you can have multiple lightweight and portable IA3 models for various downstream tasks built on top of them.
|
||||
* Performance of models fine-tuned using IA3 is comparable to the performance of fully fine-tuned models.
|
||||
* IA3 does not add any inference latency because adapter weights can be merged with the base model.
|
||||
|
||||
In principle, IA3 can be applied to any subset of weight matrices in a neural network to reduce the number of trainable
|
||||
parameters. Following the authors' implementation, IA3 weights are added to the key, value and feedforward layers
|
||||
of a Transformer model. To be specific, for transformer models, IA3 weights are added to the outputs of key and value layers, and to the input of the second feedforward layer
|
||||
in each transformer block.
|
||||
|
||||
Given the target layers for injecting IA3 parameters, the number of trainable parameters
|
||||
can be determined based on the size of the weight matrices.
|
||||
|
||||
|
||||
## Common IA3 parameters in PEFT
|
||||
|
||||
As with other methods supported by PEFT, to fine-tune a model using IA3, you need to:
|
||||
|
||||
1. Instantiate a base model.
|
||||
2. Create a configuration (`IA3Config`) where you define IA3-specific parameters.
|
||||
3. Wrap the base model with `get_peft_model()` to get a trainable `PeftModel`.
|
||||
4. Train the `PeftModel` as you normally would train the base model.
|
||||
|
||||
`IA3Config` allows you to control how IA3 is applied to the base model through the following parameters:
|
||||
|
||||
- `target_modules`: The modules (for example, attention blocks) to apply the IA3 vectors.
|
||||
- `feedforward_modules`: The list of modules to be treated as feedforward layers in `target_modules`. While learned vectors are multiplied with
|
||||
the output activation for attention blocks, the vectors are multiplied with the input for classic feedforward layers. Note that `feedforward_modules` must be a subset of `target_modules`.
|
||||
- `modules_to_save`: List of modules apart from IA3 layers to be set as trainable and saved in the final checkpoint. These typically include model's custom head that is randomly initialized for the fine-tuning task.
|
||||
|
||||
## Example Usage
|
||||
|
||||
For the task of sequence classification, one can initialize the IA3 config for a Llama model as follows:
|
||||
|
||||
```py
|
||||
peft_config = IA3Config(
|
||||
task_type=TaskType.SEQ_CLS, target_modules=["k_proj", "v_proj", "down_proj"], feedforward_modules=["down_proj"]
|
||||
)
|
||||
```
|
@ -1,61 +0,0 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# LoRA
|
||||
|
||||
This conceptual guide gives a brief overview of [LoRA](https://arxiv.org/abs/2106.09685), a technique that accelerates
|
||||
the fine-tuning of large models while consuming less memory.
|
||||
|
||||
To make fine-tuning more efficient, LoRA's approach is to represent the weight updates with two smaller
|
||||
matrices (called **update matrices**) through low-rank decomposition. These new matrices can be trained to adapt to the
|
||||
new data while keeping the overall number of changes low. The original weight matrix remains frozen and doesn't receive
|
||||
any further adjustments. To produce the final results, both the original and the adapted weights are combined.
|
||||
|
||||
This approach has a number of advantages:
|
||||
|
||||
* LoRA makes fine-tuning more efficient by drastically reducing the number of trainable parameters.
|
||||
* The original pre-trained weights are kept frozen, which means you can have multiple lightweight and portable LoRA models for various downstream tasks built on top of them.
|
||||
* LoRA is orthogonal to many other parameter-efficient methods and can be combined with many of them.
|
||||
* Performance of models fine-tuned using LoRA is comparable to the performance of fully fine-tuned models.
|
||||
* LoRA does not add any inference latency because adapter weights can be merged with the base model.
|
||||
|
||||
In principle, LoRA can be applied to any subset of weight matrices in a neural network to reduce the number of trainable
|
||||
parameters. However, for simplicity and further parameter efficiency, in Transformer models LoRA is typically applied to
|
||||
attention blocks only. The resulting number of trainable parameters in a LoRA model depends on the size of the low-rank
|
||||
update matrices, which is determined mainly by the rank `r` and the shape of the original weight matrix.
|
||||
|
||||
## Common LoRA parameters in PEFT
|
||||
|
||||
As with other methods supported by PEFT, to fine-tune a model using LoRA, you need to:
|
||||
|
||||
1. Instantiate a base model.
|
||||
2. Create a configuration (`LoraConfig`) where you define LoRA-specific parameters.
|
||||
3. Wrap the base model with `get_peft_model()` to get a trainable `PeftModel`.
|
||||
4. Train the `PeftModel` as you normally would train the base model.
|
||||
|
||||
`LoraConfig` allows you to control how LoRA is applied to the base model through the following parameters:
|
||||
|
||||
- `r`: the rank of the update matrices, expressed in `int`. Lower rank results in smaller update matrices with fewer trainable parameters.
|
||||
- `target_modules`: The modules (for example, attention blocks) to apply the LoRA update matrices.
|
||||
- `alpha`: LoRA scaling factor.
|
||||
- `bias`: Specifies if the `bias` parameters should be trained. Can be `'none'`, `'all'` or `'lora_only'`.
|
||||
- `modules_to_save`: List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. These typically include model's custom head that is randomly initialized for the fine-tuning task.
|
||||
|
||||
## LoRA examples
|
||||
|
||||
For an example of LoRA method application to various downstream tasks, please refer to the following guides:
|
||||
|
||||
* [Image classification using LoRA](../task_guides/image_classification_lora)
|
||||
* [Semantic segmentation](../task_guides/semantic_segmentation_lora)
|
||||
|
||||
While the original paper focuses on language models, the technique can be applied to any dense layers in deep learning
|
||||
models. As such, you can leverage this technique with diffusion models. See [Dreambooth fine-tuning with LoRA](../task_guides/task_guides/dreambooth_lora) task guide for an example.
|
@ -1,4 +1,8 @@
|
||||
# Prompting
|
||||
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Soft prompts
|
||||
|
||||
Training large pretrained language models is very time-consuming and compute-intensive. As they continue to grow in size, there is increasing interest in more efficient training methods such as *prompting*. Prompting primes a frozen pretrained model for a specific downstream task by including a text prompt that describes the task or even demonstrates an example of the task. With prompting, you can avoid fully training a separate model for each downstream task, and use the same frozen pretrained model instead. This is a lot easier because you can use the same model for several different tasks, and it is significantly more efficient to train and store a smaller set of prompt parameters than to train all the model's parameters.
|
||||
|
||||
@ -7,16 +11,16 @@ There are two categories of prompting methods:
|
||||
- hard prompts are manually handcrafted text prompts with discrete input tokens; the downside is that it requires a lot of effort to create a good prompt
|
||||
- soft prompts are learnable tensors concatenated with the input embeddings that can be optimized to a dataset; the downside is that they aren't human readable because you aren't matching these "virtual tokens" to the embeddings of a real word
|
||||
|
||||
This conceptual guide provides a brief overview of the soft prompt methods included in 🤗 PEFT: prompt tuning, prefix tuning, and P-tuning.
|
||||
This conceptual guide provides a brief overview of the soft prompt methods included in 🤗 PEFT: prompt tuning, prefix tuning, P-tuning, and multitask prompt tuning.
|
||||
|
||||
## Prompt tuning
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/prompt-tuning.png"/>
|
||||
</div>
|
||||
<small>Only train and store a significantly smaller set of task-specific prompt parameters <a href="https://arxiv.org/abs/2104.08691">(image source)</a>.</small>
|
||||
<small>Only train and store a significantly smaller set of task-specific prompt parameters <a href="https://hf.co/papers/2104.08691">(image source)</a>.</small>
|
||||
|
||||
Prompt tuning was developed for text classification tasks on T5 models, and all downstream tasks are cast as a text generation task. For example, sequence classification usually assigns a single class label to a sequence of text. By casting it as a text generation task, the tokens that make up the class label are *generated*. Prompts are added to the input as a series of tokens. Typically, the model parameters are fixed which means the prompt tokens are also fixed by the model parameters.
|
||||
[Prompt tuning](https://hf.co/papers/2104.08691) was developed for text classification tasks on T5 models, and all downstream tasks are cast as a text generation task. For example, sequence classification usually assigns a single class label to a sequence of text. By casting it as a text generation task, the tokens that make up the class label are *generated*. Prompts are added to the input as a series of tokens. Typically, the model parameters are fixed which means the prompt tokens are also fixed by the model parameters.
|
||||
|
||||
The key idea behind prompt tuning is that prompt tokens have their own parameters that are updated independently. This means you can keep the pretrained model's parameters frozen, and only update the gradients of the prompt token embeddings. The results are comparable to the traditional method of training the entire model, and prompt tuning performance scales as model size increases.
|
||||
|
||||
@ -27,9 +31,9 @@ Take a look at [Prompt tuning for causal language modeling](../task_guides/clm-p
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/prefix-tuning.png"/>
|
||||
</div>
|
||||
<small>Optimize the prefix parameters for each task <a href="https://arxiv.org/abs/2101.00190">(image source)</a>.</small>
|
||||
<small>Optimize the prefix parameters for each task <a href="https://hf.co/papers/2101.00190">(image source)</a>.</small>
|
||||
|
||||
Prefix tuning was designed for natural language generation (NLG) tasks on GPT models. It is very similar to prompt tuning; prefix tuning also prepends a sequence of task-specific vectors to the input that can be trained and updated while keeping the rest of the pretrained model's parameters frozen.
|
||||
[Prefix tuning](https://hf.co/papers/2101.00190) was designed for natural language generation (NLG) tasks on GPT models. It is very similar to prompt tuning; prefix tuning also prepends a sequence of task-specific vectors to the input that can be trained and updated while keeping the rest of the pretrained model's parameters frozen.
|
||||
|
||||
The main difference is that the prefix parameters are inserted in **all** of the model layers, whereas prompt tuning only adds the prompt parameters to the model input embeddings. The prefix parameters are also optimized by a separate feed-forward network (FFN) instead of training directly on the soft prompts because it causes instability and hurts performance. The FFN is discarded after updating the soft prompts.
|
||||
|
||||
@ -42,9 +46,9 @@ Take a look at [Prefix tuning for conditional generation](../task_guides/seq2seq
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/p-tuning.png"/>
|
||||
</div>
|
||||
<small>Prompt tokens can be inserted anywhere in the input sequence, and they are optimized by a prompt encoder <a href="https://arxiv.org/abs/2103.10385">(image source)</a>.</small>
|
||||
<small>Prompt tokens can be inserted anywhere in the input sequence, and they are optimized by a prompt encoder <a href="https://hf.co/papers/2103.10385">(image source)</a>.</small>
|
||||
|
||||
P-tuning is designed for natural language understanding (NLU) tasks and all language models.
|
||||
[P-tuning](https://hf.co/papers/2103.10385) is designed for natural language understanding (NLU) tasks and all language models.
|
||||
It is another variation of a soft prompt method; P-tuning also adds a trainable embedding tensor that can be optimized to find better prompts, and it uses a prompt encoder (a bidirectional long-short term memory network or LSTM) to optimize the prompt parameters. Unlike prefix tuning though:
|
||||
|
||||
- the prompt tokens can be inserted anywhere in the input sequence, and it isn't restricted to only the beginning
|
||||
@ -53,4 +57,21 @@ It is another variation of a soft prompt method; P-tuning also adds a trainable
|
||||
|
||||
The results suggest that P-tuning is more efficient than manually crafting prompts, and it enables GPT-like models to compete with BERT-like models on NLU tasks.
|
||||
|
||||
Take a look at [P-tuning for sequence classification](../task_guides/ptuning-seq-classification) for a step-by-step guide on how to train a model with P-tuning.
|
||||
Take a look at [P-tuning for sequence classification](../task_guides/ptuning-seq-classification) for a step-by-step guide on how to train a model with P-tuning.
|
||||
|
||||
## Multitask prompt tuning
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/mpt.png"/>
|
||||
</div>
|
||||
<small><a href="https://hf.co/papers/2103.10385">Multitask prompt tuning enables parameter-efficient transfer learning</a>.</small>
|
||||
|
||||
[Multitask prompt tuning (MPT)](https://hf.co/papers/2103.10385) learns a single prompt from data for multiple task types that can be shared for different target tasks. Other existing approaches learn a separate soft prompt for each task that need to be retrieved or aggregated for adaptation to target tasks. MPT consists of two stages:
|
||||
|
||||
1. source training - for each task, its soft prompt is decomposed into task-specific vectors. The task-specific vectors are multiplied together to form another matrix W, and the Hadamard product is used between W and a shared prompt matrix P to generate a task-specific prompt matrix. The task-specific prompts are distilled into a single prompt matrix that is shared across all tasks. This prompt is trained with multitask training.
|
||||
2. target adaptation - to adapt the single prompt for a target task, a target prompt is initialized and expressed as the Hadamard product of the shared prompt matrix and the task-specific low-rank prompt matrix.
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/mpt-decomposition.png"/>
|
||||
</div>
|
||||
<small><a href="https://hf.co/papers/2103.10385">Prompt decomposition</a>.</small>
|
93
docs/source/developer_guides/contributing.md
Normal file
93
docs/source/developer_guides/contributing.md
Normal file
@ -0,0 +1,93 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Contributing to PEFT
|
||||
|
||||
We are happy to accept contributions to PEFT. If you plan to contribute, please read this document to make the process as smooth as possible.
|
||||
|
||||
## Installation
|
||||
|
||||
The installation instructions can be found [here](https://huggingface.co/docs/peft/install). If you want to provide code contributions to PEFT, you should choose the "source" installation method.
|
||||
|
||||
If you are new to creating a pull request, follow [these instructions from GitHub](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request).
|
||||
|
||||
## Running tests and code quality checks
|
||||
|
||||
Regardless of the type of contribution (unless it’s only about the docs), you should run tests and code quality checks before creating a PR to ensure that your contribution doesn’t break anything and follows the standards of the project.
|
||||
|
||||
We provide a Makefile to facilitate those steps. Run the code below for the unit test:
|
||||
|
||||
```sh
|
||||
make test
|
||||
```
|
||||
|
||||
Run one of the following to either check or check and fix code quality and style:
|
||||
|
||||
```sh
|
||||
make quality # just check
|
||||
make style # check and fix
|
||||
```
|
||||
|
||||
|
||||
Running all the tests can take a couple of minutes. Therefore, during development, it can be useful to run only those tests specific to your change:
|
||||
|
||||
```sh
|
||||
pytest tests/ -k <name-of-test>
|
||||
```
|
||||
|
||||
This should finish much quicker and allow faster iteration. Before creating the PR, however, please still run the whole test suite, as some changes can inadvertently break tests that at first glance are unrelated.
|
||||
|
||||
If your change is specific to a hardware setting (e.g. it requires CUDA), take a look at `tests/test_gpu_examples.py` and `tests/test_common_gpu.py` – maybe it makes sense to add a test there. If your change could have an effect on saving and loading models, please run the tests with the `--regression` flag to trigger regression tests.
|
||||
|
||||
It can happen that while you’re working on your PR, the underlying code base changes due to other changes being merged. If that happens – especially when there is a merge conflict – please update your branch to be on the latest changes. This can be a merge or a rebase, whatever you prefer. We will squash and merge the PR once it’s ready.
|
||||
|
||||
## PR description
|
||||
|
||||
When opening the PR, please provide a nice description of the change you provide. If it relates to other issues or PRs, please reference them. Providing a good description will not only help the reviewers review your code better and faster, it can also later be used (as a basis) for the commit message, which helps with long term maintenance of the project.
|
||||
|
||||
If your code makes some non-trivial changes, it can also be a good idea to add comments to the code to explain those changes. For example, if you had to iterate on your implementation multiple times because the most obvious way didn’t work, it’s a good indication that a code comment is needed.
|
||||
|
||||
## Providing a bugfix
|
||||
|
||||
Please give a description of the circumstances that lead to the bug. If there is an existing issue, please link to it (e.g. “Resolves #12345”).
|
||||
|
||||
Ideally, when a bugfix is provided, it should be accompanied by a test for this bug. The test should fail with the current code and pass with the bugfix. Add a comment to the test that references the issue or PR. Without such a test, it is difficult to prevent regressions in the future.
|
||||
|
||||
## Adding a new fine-tuning method
|
||||
|
||||
New parameter-efficient fine-tuning methods are developed all the time. If you would like to add a new, promising method to PEFT, please follow these steps.
|
||||
|
||||
**Requirements**
|
||||
|
||||
1. Please add a link to the source (usually a paper) of the method.
|
||||
2. Some evidence should be provided that there is general interest in using the method. We will not add new methods that are freshly published but without evidence that there is demand for it.
|
||||
3. Ideally, we want to not only add the implementation of the new method, but also examples (notebooks, scripts), documentation, and an extensive test suite that proves that the method works with a variety of tasks. However, this can be very daunting. Therefore, it is also acceptable to only provide the implementation and at least one working example. Documentation and tests can be added in follow up PRs.
|
||||
|
||||
**Steps**
|
||||
|
||||
Before you start to implement the new method, please open an issue on GitHub with your proposal. That way, the maintainers can give you some early feedback.
|
||||
|
||||
When implementing the method, it makes sense to look for existing implementations that already exist as a guide. Moreover, when you structure your code, please take inspiration from the other PEFT methods. For example, if your method is similar to LoRA, it makes sense to structure your code similarly or even re-use some functions or classes where it makes sense (but don’t overdo it, some code duplication is okay).
|
||||
|
||||
Once you have something that seems to be working, don’t hesitate to create a draft PR, even if it’s not in a mergeable state yet. The maintainers will be happy to give you feedback and guidance along the way.
|
||||
|
||||
## Adding other features
|
||||
|
||||
It is best if you first open an issue on GitHub with a proposal to add the new feature. That way, you can discuss with the maintainers if it makes sense to add the feature before spending too much time on implementing it.
|
||||
|
||||
New features should generally be accompanied by tests and documentation or examples. Without the latter, users will have a hard time discovering your cool new feature.
|
||||
|
||||
Changes to the code should be implemented in a backward-compatible way. For example, existing code should continue to work the same way after the feature is merged.
|
242
docs/source/developer_guides/custom_models.md
Normal file
242
docs/source/developer_guides/custom_models.md
Normal file
@ -0,0 +1,242 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Working with custom models
|
||||
|
||||
Some fine-tuning techniques, such as prompt tuning, are specific to language models. That means in 🤗 PEFT, it is
|
||||
assumed a 🤗 Transformers model is being used. However, other fine-tuning techniques - like
|
||||
[LoRA](../conceptual_guides/lora) - are not restricted to specific model types.
|
||||
|
||||
In this guide, we will see how LoRA can be applied to a multilayer perceptron, a computer vision model from the [timm](https://huggingface.co/docs/timm/index) library, or a new 🤗 Transformers architectures.
|
||||
|
||||
## Multilayer perceptron
|
||||
|
||||
Let's assume that we want to fine-tune a multilayer perceptron with LoRA. Here is the definition:
|
||||
|
||||
```python
|
||||
from torch import nn
|
||||
|
||||
|
||||
class MLP(nn.Module):
|
||||
def __init__(self, num_units_hidden=2000):
|
||||
super().__init__()
|
||||
self.seq = nn.Sequential(
|
||||
nn.Linear(20, num_units_hidden),
|
||||
nn.ReLU(),
|
||||
nn.Linear(num_units_hidden, num_units_hidden),
|
||||
nn.ReLU(),
|
||||
nn.Linear(num_units_hidden, 2),
|
||||
nn.LogSoftmax(dim=-1),
|
||||
)
|
||||
|
||||
def forward(self, X):
|
||||
return self.seq(X)
|
||||
```
|
||||
|
||||
This is a straightforward multilayer perceptron with an input layer, a hidden layer, and an output layer.
|
||||
|
||||
<Tip>
|
||||
|
||||
For this toy example, we choose an exceedingly large number of hidden units to highlight the efficiency gains
|
||||
from PEFT, but those gains are in line with more realistic examples.
|
||||
|
||||
</Tip>
|
||||
|
||||
There are a few linear layers in this model that could be tuned with LoRA. When working with common 🤗 Transformers
|
||||
models, PEFT will know which layers to apply LoRA to, but in this case, it is up to us as a user to choose the layers.
|
||||
To determine the names of the layers to tune:
|
||||
|
||||
```python
|
||||
print([(n, type(m)) for n, m in MLP().named_modules()])
|
||||
```
|
||||
|
||||
This should print:
|
||||
|
||||
```
|
||||
[('', __main__.MLP),
|
||||
('seq', torch.nn.modules.container.Sequential),
|
||||
('seq.0', torch.nn.modules.linear.Linear),
|
||||
('seq.1', torch.nn.modules.activation.ReLU),
|
||||
('seq.2', torch.nn.modules.linear.Linear),
|
||||
('seq.3', torch.nn.modules.activation.ReLU),
|
||||
('seq.4', torch.nn.modules.linear.Linear),
|
||||
('seq.5', torch.nn.modules.activation.LogSoftmax)]
|
||||
```
|
||||
|
||||
Let's say we want to apply LoRA to the input layer and to the hidden layer, those are `'seq.0'` and `'seq.2'`. Moreover,
|
||||
let's assume we want to update the output layer without LoRA, that would be `'seq.4'`. The corresponding config would
|
||||
be:
|
||||
|
||||
```python
|
||||
from peft import LoraConfig
|
||||
|
||||
config = LoraConfig(
|
||||
target_modules=["seq.0", "seq.2"],
|
||||
modules_to_save=["seq.4"],
|
||||
)
|
||||
```
|
||||
|
||||
With that, we can create our PEFT model and check the fraction of parameters trained:
|
||||
|
||||
```python
|
||||
from peft import get_peft_model
|
||||
|
||||
model = MLP()
|
||||
peft_model = get_peft_model(model, config)
|
||||
peft_model.print_trainable_parameters()
|
||||
# prints trainable params: 56,164 || all params: 4,100,164 || trainable%: 1.369798866581922
|
||||
```
|
||||
|
||||
Finally, we can use any training framework we like, or write our own fit loop, to train the `peft_model`.
|
||||
|
||||
For a complete example, check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/multilayer_perceptron/multilayer_perceptron_lora.ipynb).
|
||||
|
||||
## timm models
|
||||
|
||||
The [timm](https://huggingface.co/docs/timm/index) library contains a large number of pretrained computer vision models.
|
||||
Those can also be fine-tuned with PEFT. Let's check out how this works in practice.
|
||||
|
||||
To start, ensure that timm is installed in the Python environment:
|
||||
|
||||
```bash
|
||||
python -m pip install -U timm
|
||||
```
|
||||
|
||||
Next we load a timm model for an image classification task:
|
||||
|
||||
```python
|
||||
import timm
|
||||
|
||||
num_classes = ...
|
||||
model_id = "timm/poolformer_m36.sail_in1k"
|
||||
model = timm.create_model(model_id, pretrained=True, num_classes=num_classes)
|
||||
```
|
||||
|
||||
Again, we need to make a decision about what layers to apply LoRA to. Since LoRA supports 2D conv layers, and since
|
||||
those are a major building block of this model, we should apply LoRA to the 2D conv layers. To identify the names of
|
||||
those layers, let's look at all the layer names:
|
||||
|
||||
```python
|
||||
print([(n, type(m)) for n, m in MLP().named_modules()])
|
||||
```
|
||||
|
||||
This will print a very long list, we'll only show the first few:
|
||||
|
||||
```
|
||||
[('', timm.models.metaformer.MetaFormer),
|
||||
('stem', timm.models.metaformer.Stem),
|
||||
('stem.conv', torch.nn.modules.conv.Conv2d),
|
||||
('stem.norm', torch.nn.modules.linear.Identity),
|
||||
('stages', torch.nn.modules.container.Sequential),
|
||||
('stages.0', timm.models.metaformer.MetaFormerStage),
|
||||
('stages.0.downsample', torch.nn.modules.linear.Identity),
|
||||
('stages.0.blocks', torch.nn.modules.container.Sequential),
|
||||
('stages.0.blocks.0', timm.models.metaformer.MetaFormerBlock),
|
||||
('stages.0.blocks.0.norm1', timm.layers.norm.GroupNorm1),
|
||||
('stages.0.blocks.0.token_mixer', timm.models.metaformer.Pooling),
|
||||
('stages.0.blocks.0.token_mixer.pool', torch.nn.modules.pooling.AvgPool2d),
|
||||
('stages.0.blocks.0.drop_path1', torch.nn.modules.linear.Identity),
|
||||
('stages.0.blocks.0.layer_scale1', timm.models.metaformer.Scale),
|
||||
('stages.0.blocks.0.res_scale1', torch.nn.modules.linear.Identity),
|
||||
('stages.0.blocks.0.norm2', timm.layers.norm.GroupNorm1),
|
||||
('stages.0.blocks.0.mlp', timm.layers.mlp.Mlp),
|
||||
('stages.0.blocks.0.mlp.fc1', torch.nn.modules.conv.Conv2d),
|
||||
('stages.0.blocks.0.mlp.act', torch.nn.modules.activation.GELU),
|
||||
('stages.0.blocks.0.mlp.drop1', torch.nn.modules.dropout.Dropout),
|
||||
('stages.0.blocks.0.mlp.norm', torch.nn.modules.linear.Identity),
|
||||
('stages.0.blocks.0.mlp.fc2', torch.nn.modules.conv.Conv2d),
|
||||
('stages.0.blocks.0.mlp.drop2', torch.nn.modules.dropout.Dropout),
|
||||
('stages.0.blocks.0.drop_path2', torch.nn.modules.linear.Identity),
|
||||
('stages.0.blocks.0.layer_scale2', timm.models.metaformer.Scale),
|
||||
('stages.0.blocks.0.res_scale2', torch.nn.modules.linear.Identity),
|
||||
('stages.0.blocks.1', timm.models.metaformer.MetaFormerBlock),
|
||||
('stages.0.blocks.1.norm1', timm.layers.norm.GroupNorm1),
|
||||
('stages.0.blocks.1.token_mixer', timm.models.metaformer.Pooling),
|
||||
('stages.0.blocks.1.token_mixer.pool', torch.nn.modules.pooling.AvgPool2d),
|
||||
...
|
||||
('head.global_pool.flatten', torch.nn.modules.linear.Identity),
|
||||
('head.norm', timm.layers.norm.LayerNorm2d),
|
||||
('head.flatten', torch.nn.modules.flatten.Flatten),
|
||||
('head.drop', torch.nn.modules.linear.Identity),
|
||||
('head.fc', torch.nn.modules.linear.Linear)]
|
||||
]
|
||||
```
|
||||
|
||||
Upon closer inspection, we see that the 2D conv layers have names such as `"stages.0.blocks.0.mlp.fc1"` and
|
||||
`"stages.0.blocks.0.mlp.fc2"`. How can we match those layer names specifically? You can write a [regular
|
||||
expressions](https://docs.python.org/3/library/re.html) to match the layer names. For our case, the regex
|
||||
`r".*\.mlp\.fc\d"` should do the job.
|
||||
|
||||
Furthermore, as in the first example, we should ensure that the output layer, in this case the classification head, is
|
||||
also updated. Looking at the end of the list printed above, we can see that it's named `'head.fc'`. With that in mind,
|
||||
here is our LoRA config:
|
||||
|
||||
```python
|
||||
config = LoraConfig(target_modules=r".*\.mlp\.fc\d", modules_to_save=["head.fc"])
|
||||
```
|
||||
|
||||
Then we only need to create the PEFT model by passing our base model and the config to `get_peft_model`:
|
||||
|
||||
```python
|
||||
peft_model = get_peft_model(model, config)
|
||||
peft_model.print_trainable_parameters()
|
||||
# prints trainable params: 1,064,454 || all params: 56,467,974 || trainable%: 1.88505789139876
|
||||
```
|
||||
|
||||
This shows us that we only need to train less than 2% of all parameters, which is a huge efficiency gain.
|
||||
|
||||
For a complete example, check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/image_classification/image_classification_timm_peft_lora.ipynb).
|
||||
|
||||
## New transformers architectures
|
||||
|
||||
When new popular transformers architectures are released, we do our best to quickly add them to PEFT. If you come across a transformers model that is not supported out of the box, don't worry, it will most likely still work if the config is set correctly. Specifically, you have to identify the layers that should be adapted and set them correctly when initializing the corresponding config class, e.g. `LoraConfig`. Here are some tips to help with this.
|
||||
|
||||
As a first step, it is a good idea is to check the existing models for inspiration. You can find them inside of [constants.py](https://github.com/huggingface/peft/blob/main/src/peft/utils/constants.py) in the PEFT repository. Often, you'll find a similar architecture that uses the same names. For example, if the new model architecture is a variation of the "mistral" model and you want to apply LoRA, you can see that the entry for "mistral" in `TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING` contains `["q_proj", "v_proj"]`. This tells you that for "mistral" models, the `target_modules` for LoRA should be `["q_proj", "v_proj"]`:
|
||||
|
||||
```python
|
||||
from peft import LoraConfig, get_peft_model
|
||||
|
||||
my_mistral_model = ...
|
||||
config = LoraConfig(
|
||||
target_modules=["q_proj", "v_proj"],
|
||||
..., # other LoRA arguments
|
||||
)
|
||||
peft_model = get_peft_model(my_mistral_model, config)
|
||||
```
|
||||
|
||||
If that doesn't help, check the existing modules in your model architecture with the `named_modules` method and try to identify the attention layers, especially the key, query, and value layers. Those will often have names such as `c_attn`, `query`, `q_proj`, etc. The key layer is not always adapted, and ideally, you should check whether including it results in better performance.
|
||||
|
||||
Additionally, linear layers are common targets to be adapted (e.g. in [QLoRA paper](https://arxiv.org/abs/2305.14314), authors suggest to adapt them as well). Their names will often contain the strings `fc` or `dense`.
|
||||
|
||||
If you want to add a new model to PEFT, please create an entry in [constants.py](https://github.com/huggingface/peft/blob/main/src/peft/utils/constants.py) and open a pull request on the [repository](https://github.com/huggingface/peft/pulls). Don't forget to update the [README](https://github.com/huggingface/peft#models-support-matrix) as well.
|
||||
|
||||
## Checking the result
|
||||
|
||||
When you think that you have correctly specified the `target_modules` and called `get_peft_model`, you can check the fraction of parameters that will be trainable like this:
|
||||
|
||||
```python
|
||||
peft_model.print_trainable_parameters()
|
||||
```
|
||||
|
||||
If this number is too low or high, check the model `repr` by printing the model. This will show you the names and type of all of all the layers in the model. Ensure that the intended layers, and only those, are replaced by adapter layers. For instance, for LoRA applied to `nn.Linear` layers, you should see that `lora.Linear` layers are being used.
|
||||
|
||||
To get a quick overview of all layers that were adapted, you can also use the the `targeted_module_names` attribute:
|
||||
|
||||
```python
|
||||
print(peft_model.targeted_module_names)
|
||||
```
|
||||
|
||||
This lists the names of each module that was actually adapted.
|
190
docs/source/developer_guides/lora.md
Normal file
190
docs/source/developer_guides/lora.md
Normal file
@ -0,0 +1,190 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# LoRA
|
||||
|
||||
LoRA is low-rank decomposition method to reduce the number of trainable parameters which speeds up finetuning large models and uses less memory. In PEFT, using LoRA is as easy as setting up a [`LoraConfig`] and wrapping it with [`get_peft_model`] to create a trainable [`PeftModel`].
|
||||
|
||||
This guide explores in more detail other options and features for using LoRA.
|
||||
|
||||
## Initialization
|
||||
|
||||
The initialization of LoRA weights is controlled by the parameter `init_lora_weights` in [`LoraConfig`]. By default, PEFT initializes LoRA weights with Kaiming-uniform for weight A and zeros for weight B resulting in an identity transform (same as the reference [implementation](https://github.com/microsoft/LoRA)).
|
||||
|
||||
It is also possible to pass `init_lora_weights="gaussian"`. As the name suggests, this initializes weight A with a Gaussian distribution and zeros for weight B (this is how [Diffusers](https://huggingface.co/docs/diffusers/index) initializes LoRA weights).
|
||||
|
||||
```py
|
||||
from peft import LoraConfig
|
||||
|
||||
config = LoraConfig(init_lora_weights="gaussian", ...)
|
||||
```
|
||||
|
||||
There is also an option to set `init_lora_weights=False` which is useful for debugging and testing. This should be the only time you use this option. When choosing this option, the LoRA weights are initialized such that they do *not* result in an identity transform.
|
||||
|
||||
```py
|
||||
from peft import LoraConfig
|
||||
|
||||
config = LoraConfig(init_lora_weights=False, ...)
|
||||
```
|
||||
|
||||
### LoftQ
|
||||
|
||||
When quantizing the base model for QLoRA training, consider using the [LoftQ initialization](https://arxiv.org/abs/2310.08659), which has been shown to improve performance when training quantized models. The idea is that the LoRA weights are initialized such that the quantization error is minimized. If you're using LoftQ, *do not* quantize the base model. You should set up a [`LoftQConfig`] instead:
|
||||
|
||||
```python
|
||||
from peft import LoftQConfig, LoraConfig, get_peft_model
|
||||
|
||||
base_model = AutoModelForCausalLM.from_pretrained(...) # don't quantize here
|
||||
loftq_config = LoftQConfig(loftq_bits=4, ...) # set 4bit quantization
|
||||
lora_config = LoraConfig(..., init_lora_weights="loftq", loftq_config=loftq_config)
|
||||
peft_model = get_peft_model(base_model, lora_config)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
Learn more about how PEFT works with quantization in the [Quantization](quantization) guide.
|
||||
|
||||
</Tip>
|
||||
|
||||
### Rank-stabilized LoRA
|
||||
|
||||
Another way to initialize [`LoraConfig`] is with the [rank-stabilized LoRA (rsLoRA)](https://huggingface.co/papers/2312.03732) method. The LoRA architecture scales each adapter during every forward pass by a fixed scalar which is set at initialization and depends on the rank `r`. The scalar is given by `lora_alpha/r` in the original implementation, but rsLoRA uses `lora_alpha/math.sqrt(r)` which stabilizes the adapters and increases the performance potential from using a higher `r`.
|
||||
|
||||
```py
|
||||
from peft import LoraConfig
|
||||
|
||||
config = LoraConfig(use_rslora=True, ...)
|
||||
```
|
||||
|
||||
## Merge adapters
|
||||
|
||||
While LoRA is significantly smaller and faster to train, you may encounter latency issues during inference due to separately loading the base model and the LoRA adapter. To eliminate latency, use the [`~LoraModel.merge_and_unload`] function to merge the adapter weights with the base model. This allows you to use the newly merged model as a standalone model. The [`~LoraModel.merge_and_unload`] function doesn't keep the adapter weights in memory.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
from peft import PeftModel
|
||||
|
||||
base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
||||
peft_model_id = "alignment-handbook/zephyr-7b-sft-lora"
|
||||
model = PeftModel.from_pretrained(base_model, peft_model_id)
|
||||
model.merge_and_unload()
|
||||
```
|
||||
|
||||
If you need to keep a copy of the weights so you can unmerge the adapter later or delete and load different ones, you should use the [`~LoraModel.merge_adapter`] function instead. Now you have the option to use [`~LoraModel.unmerge_adapter`] to return the base model.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
from peft import PeftModel
|
||||
|
||||
base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
||||
peft_model_id = "alignment-handbook/zephyr-7b-sft-lora"
|
||||
model = PeftModel.from_pretrained(base_model, peft_model_id)
|
||||
model.merge_adapter()
|
||||
|
||||
# unmerge the LoRA layers from the base model
|
||||
model.unmerge_adapter()
|
||||
```
|
||||
|
||||
The [`~LoraModel.add_weighted_adapter`] function is useful for merging multiple LoRAs into a new adapter based on a user provided weighting scheme in the `weights` parameter. Below is an end-to-end example.
|
||||
|
||||
First load the base model:
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForCausalLM
|
||||
from peft import PeftModel
|
||||
import torch
|
||||
|
||||
base_model = AutoModelForCausalLM.from_pretrained(
|
||||
"mistralai/Mistral-7B-v0.1", torch_dtype=torch.float16, device_map="auto"
|
||||
)
|
||||
```
|
||||
|
||||
Then we load the first adapter:
|
||||
|
||||
```python
|
||||
peft_model_id = "alignment-handbook/zephyr-7b-sft-lora"
|
||||
model = PeftModel.from_pretrained(base_model, peft_model_id, adapter_name="sft")
|
||||
```
|
||||
|
||||
Then load a different adapter and merge it with the first one:
|
||||
|
||||
```python
|
||||
model.load_adapter("alignment-handbook/zephyr-7b-dpo-lora", adapter_name="dpo")
|
||||
model.add_weighted_adapter(
|
||||
adapters=["sft", "dpo"],
|
||||
weights=[0.7, 0.3],
|
||||
adapter_name="sft-dpo",
|
||||
combination_type="linear"
|
||||
)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
There are several supported methods for `combination_type`. Refer to the [documentation](../package_reference/lora#peft.LoraModel.add_weighted_adapter) for more details. Note that "svd" as the `combination_type` is not supported when using `torch.float16` or `torch.bfloat16` as the datatype.
|
||||
|
||||
</Tip>
|
||||
|
||||
Now, perform inference:
|
||||
|
||||
```python
|
||||
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
||||
|
||||
prompt = "Hey, are you conscious? Can you talk to me?"
|
||||
inputs = tokenizer(prompt, return_tensors="pt")
|
||||
inputs = {k: v.to("cuda") for k, v in inputs.items()}
|
||||
|
||||
with torch.no_grad():
|
||||
generate_ids = model.generate(**inputs, max_length=30)
|
||||
outputs = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
||||
print(outputs)
|
||||
```
|
||||
|
||||
## Load adapters
|
||||
|
||||
Adapters can be loaded onto a pretrained model with [`~PeftModel.load_adapter`], which is useful for trying out different adapters whose weights aren't merged. Set the active adapter weights with the [`~LoraModel.set_adapter`] function.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
from peft import PeftModel
|
||||
|
||||
base_model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
||||
peft_model_id = "alignment-handbook/zephyr-7b-sft-lora"
|
||||
model = PeftModel.from_pretrained(base_model, peft_model_id)
|
||||
|
||||
# load different adapter
|
||||
model.load_adapter("alignment-handbook/zephyr-7b-dpo-lora", adapter_name="dpo")
|
||||
|
||||
# set adapter as active
|
||||
model.set_adapter("dpo")
|
||||
```
|
||||
|
||||
To return the base model, you could use [`~LoraModel.unload`] to unload all of the LoRA modules or [`~LoraModel.delete_adapter`] to delete the adapter entirely.
|
||||
|
||||
```py
|
||||
# unload adapter
|
||||
model.unload()
|
||||
|
||||
# delete adapter
|
||||
model.delete_adapter("dpo")
|
||||
```
|
||||
|
||||
## QLoRA-style training
|
||||
|
||||
The default LoRA settings in 🤗PEFT follow the [original paper](https://hf.co/papers/2106.09685) and add trainable weights to the query and value layers of each attention block. However, in [QLoRA](https://hf.co/papers/2305.14314), it was found that adding trainable weights to all the linear layers of a transformer model is beneficial to match full-finetuning performance. Since the list of modules to add will vary depending on the architecture, we provided a convenient shorthand : simple specify `target_modules='all-linear'` and let 🤗PEFT handle the rest:
|
||||
|
||||
```py
|
||||
config = LoraConfig(target_modules="all-linear", ...) # adds LoRA to all linear layers like in QLoRA
|
||||
```
|
107
docs/source/developer_guides/low_level_api.md
Normal file
107
docs/source/developer_guides/low_level_api.md
Normal file
@ -0,0 +1,107 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# PEFT as a utility library
|
||||
|
||||
Let's cover in this section how you can leverage PEFT's low level API to inject trainable adapters into any `torch` module.
|
||||
The development of this API has been motivated by the need for super users to not rely on modeling classes that are exposed in PEFT library and still be able to use adapter methods such as LoRA, IA3 and AdaLoRA.
|
||||
|
||||
## Supported tuner types
|
||||
|
||||
Currently the supported adapter types are the 'injectable' adapters, meaning adapters where an inplace modification of the model is sufficient to correctly perform the fine tuning. As such, only [LoRA](../conceptual_guides/lora), AdaLoRA and [IA3](../conceptual_guides/ia3) are currently supported in this API.
|
||||
|
||||
## `inject_adapter_in_model` method
|
||||
|
||||
To perform the adapter injection, simply use `inject_adapter_in_model` method that takes 3 arguments, the PEFT config and the model itself and an optional adapter name. You can also attach multiple adapters in the model if you call multiple times `inject_adapter_in_model` with different adapter names.
|
||||
|
||||
Below is a basic example usage of how to inject LoRA adapters into the submodule `linear` of the module `DummyModel`.
|
||||
```python
|
||||
import torch
|
||||
from peft import inject_adapter_in_model, LoraConfig
|
||||
|
||||
|
||||
class DummyModel(torch.nn.Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.embedding = torch.nn.Embedding(10, 10)
|
||||
self.linear = torch.nn.Linear(10, 10)
|
||||
self.lm_head = torch.nn.Linear(10, 10)
|
||||
|
||||
def forward(self, input_ids):
|
||||
x = self.embedding(input_ids)
|
||||
x = self.linear(x)
|
||||
x = self.lm_head(x)
|
||||
return x
|
||||
|
||||
|
||||
lora_config = LoraConfig(
|
||||
lora_alpha=16,
|
||||
lora_dropout=0.1,
|
||||
r=64,
|
||||
bias="none",
|
||||
target_modules=["linear"],
|
||||
)
|
||||
|
||||
model = DummyModel()
|
||||
model = inject_adapter_in_model(lora_config, model)
|
||||
|
||||
dummy_inputs = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]])
|
||||
dummy_outputs = model(dummy_inputs)
|
||||
```
|
||||
|
||||
If you print the model, you will notice that the adapters have been correctly injected into the model
|
||||
|
||||
```bash
|
||||
DummyModel(
|
||||
(embedding): Embedding(10, 10)
|
||||
(linear): Linear(
|
||||
in_features=10, out_features=10, bias=True
|
||||
(lora_dropout): ModuleDict(
|
||||
(default): Dropout(p=0.1, inplace=False)
|
||||
)
|
||||
(lora_A): ModuleDict(
|
||||
(default): Linear(in_features=10, out_features=64, bias=False)
|
||||
)
|
||||
(lora_B): ModuleDict(
|
||||
(default): Linear(in_features=64, out_features=10, bias=False)
|
||||
)
|
||||
(lora_embedding_A): ParameterDict()
|
||||
(lora_embedding_B): ParameterDict()
|
||||
)
|
||||
(lm_head): Linear(in_features=10, out_features=10, bias=True)
|
||||
)
|
||||
```
|
||||
Note that it should be up to users to properly take care of saving the adapters (in case they want to save adapters only), as `model.state_dict()` will return the full state dict of the model.
|
||||
In case you want to extract the adapters state dict you can use the `get_peft_model_state_dict` method:
|
||||
|
||||
```python
|
||||
from peft import get_peft_model_state_dict
|
||||
|
||||
peft_state_dict = get_peft_model_state_dict(model)
|
||||
print(peft_state_dict)
|
||||
```
|
||||
|
||||
## Pros and cons
|
||||
|
||||
When to use this API and when to not use it? Let's discuss in this section the pros and cons
|
||||
|
||||
Pros:
|
||||
- The model gets modified in-place, meaning the model will preserve all its original attributes and methods
|
||||
- Works for any torch module, and any modality (vision, text, multi-modal)
|
||||
|
||||
Cons:
|
||||
- You need to manually writing Hugging Face `from_pretrained` and `save_pretrained` utility methods if you want to easily save / load adapters from the Hugging Face Hub.
|
||||
- You cannot use any of the utility method provided by `PeftModel` such as disabling adapters, merging adapters, etc.
|
39
docs/source/developer_guides/mixed_models.md
Normal file
39
docs/source/developer_guides/mixed_models.md
Normal file
@ -0,0 +1,39 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Working with mixed adapter types
|
||||
|
||||
Normally, it is not possible to mix different adapter types in 🤗 PEFT. For example, even though it is possible to create a PEFT model that has two different LoRA adapters (that can have different config options), it is not possible to combine a LoRA adapter with a LoHa adapter. However, by using a mixed model, this works as long as the adapter types are compatible.
|
||||
|
||||
## Loading different adapter types into a PEFT model
|
||||
|
||||
To load different adapter types into a PEFT model, proceed the same as if you were loading two adapters of the same type, but use `PeftMixedModel` instead of `PeftModel`:
|
||||
|
||||
```py
|
||||
from peft import PeftMixedModel
|
||||
|
||||
base_model = ... # load the base model, e.g. from transformers
|
||||
# load first adapter, which will be called "default"
|
||||
peft_model = PeftMixedModel.from_pretrained(base_model, <path_to_adapter1>)
|
||||
peft_model.load_adapter(<path_to_adapter2>, adapter_name="other")
|
||||
peft_model.set_adapter(["default", "other"])
|
||||
```
|
||||
|
||||
The last line is necessary if you want to activate both adapters, otherwise, only the first adapter would be active. Of course, you can add more different adapters by calling `add_adapter` repeatedly.
|
||||
|
||||
Currently, the main purpose of mixed adapter types is to combine trained adapters for inference. Although it is technically also possible to train a mixed adapter model, this has not been tested and is not recommended.
|
||||
|
||||
## Tips
|
||||
|
||||
- Not all adapter types can be combined. See `peft.tuners.mixed.COMPATIBLE_TUNER_TYPES` for a list of compatible types. An error will be raised if you are trying to combine incompatible adapter types.
|
||||
- It is possible to mix multiple adapters of the same type. This can be useful to combine adapters with very different configs.
|
||||
- If you want to combine a lot of different adapters, it is most performant to add the same types of adapters consecutively. E.g., add LoRA1, LoRA2, LoHa1, LoHa2 in this order, instead of LoRA1, LoHa1, LoRA2, LoHa2. The order will make a difference for the outcome in most cases, but since no order is better a priori, it is best to choose the order that is most performant.
|
146
docs/source/developer_guides/quantization.md
Normal file
146
docs/source/developer_guides/quantization.md
Normal file
@ -0,0 +1,146 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Quantization
|
||||
|
||||
Quantization represents data with fewer bits, making it a useful technique for reducing memory-usage and accelerating inference especially when it comes to large language models (LLMs). There are several ways to quantize a model including:
|
||||
|
||||
* optimizing which model weights are quantized with the [AWQ](https://hf.co/papers/2306.00978) algorithm
|
||||
* independently quantizing each row of a weight matrix with the [GPTQ](https://hf.co/papers/2210.17323) algorithm
|
||||
* quantizing to 8-bit and 4-bit precision with the [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) library
|
||||
|
||||
However, after a model is quantized it isn't typically further trained for downstream tasks because training can be unstable due to the lower precision of the weights and activations. But since PEFT methods only add *extra* trainable parameters, this allows you to train a quantized model with a PEFT adapter on top! Combining quantization with PEFT can be a good strategy for training even the largest models on a single GPU. For example, [QLoRA](https://hf.co/papers/2305.14314) is a method that quantizes a model to 4-bits and then trains it with LoRA. This method allows you to finetune a 65B parameter model on a single 48GB GPU!
|
||||
|
||||
In this guide, you'll see how to quantize a model to 4-bits and train it with LoRA.
|
||||
|
||||
## Quantize a model
|
||||
|
||||
[bitsandbytes](https://github.com/TimDettmers/bitsandbytes) is a quantization library with a Transformers integration. With this integration, you can quantize a model to 8 or 4-bits and enable many other options by configuring the [`~transformers.BitsAndBytesConfig`] class. For example, you can:
|
||||
|
||||
* set `load_in_4bit=True` to quantize the model to 4-bits when you load it
|
||||
* set `bnb_4bit_quant_type="nf4"` to use a special 4-bit data type for weights initialized from a normal distribution
|
||||
* set `bnb_4bit_use_double_quant=True` to use a nested quantization scheme to quantize the already quantized weights
|
||||
* set `bnb_4bit_compute_dtype=torch.bfloat16` to use bfloat16 for faster computation
|
||||
|
||||
```py
|
||||
import torch
|
||||
from transformers import BitsAndBytesConfig
|
||||
|
||||
config = BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
bnb_4bit_use_double_quant=True,
|
||||
bnb_4bit_compute_dtype=torch.bfloat16,
|
||||
)
|
||||
```
|
||||
|
||||
Pass the `config` to the [`~transformers.AutoModelForCausalLM.from_pretrained`] method.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=config)
|
||||
```
|
||||
|
||||
Next, you should call the [`~peft.utils.prepare_model_for_kbit_training`] function to preprocess the quantized model for traininng.
|
||||
|
||||
```py
|
||||
from peft import prepare_model_for_kbit_training
|
||||
|
||||
model = prepare_model_for_kbit_training(model)
|
||||
```
|
||||
|
||||
Now that the quantized model is ready, let's set up a configuration.
|
||||
|
||||
## LoraConfig
|
||||
|
||||
Create a [`LoraConfig`] with the following parameters (or choose your own):
|
||||
|
||||
```py
|
||||
from peft import LoraConfig
|
||||
|
||||
config = LoraConfig(
|
||||
r=16,
|
||||
lora_alpha=8,
|
||||
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
|
||||
lora_dropout=0.05
|
||||
bias="none",
|
||||
task_type="CAUSAL_LM"
|
||||
)
|
||||
```
|
||||
|
||||
Then use the [`get_peft_model`] function to create a [`PeftModel`] from the quantized model and configuration.
|
||||
|
||||
```py
|
||||
from peft import get_peft_model
|
||||
|
||||
model = get_peft_model(model, config)
|
||||
```
|
||||
|
||||
You're all set for training with whichever training method you prefer!
|
||||
|
||||
### LoftQ initialization
|
||||
|
||||
[LoftQ](https://hf.co/papers/2310.08659) initializes LoRA weights such that the quantization error is minimized, and it can improve performance when training quantized models. To get started, create a [`LoftQConfig`] and set `loftq_bits=4` for 4-bit quantization.
|
||||
|
||||
<Tip warning={true}>
|
||||
|
||||
LoftQ initialization does not require quantizing the base model with the `load_in_4bits` parameter in the [`~transformers.AutoModelForCausalLM.from_pretrained`] method! Learn more about LoftQ initialization in the [Initialization options](../developer_guides/lora#initialization) section.
|
||||
|
||||
Note: You can only perform LoftQ initialization on a GPU.
|
||||
|
||||
</Tip>
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
from peft import LoftQConfig, LoraConfig, get_peft_model
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", device_map="auto")
|
||||
loftq_config = LoftQConfig(loftq_bits=4)
|
||||
```
|
||||
|
||||
Now pass the `loftq_config` to the [`LoraConfig`] to enable LoftQ initialization, and create a [`PeftModel`] for training.
|
||||
|
||||
```py
|
||||
lora_config = LoraConfig(
|
||||
init_lora_weights="loftq",
|
||||
loftq_config=loftq_config,
|
||||
r=16,
|
||||
lora_alpha=8,
|
||||
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
|
||||
lora_dropout=0.05,
|
||||
bias="none",
|
||||
task_type="CAUSAL_LM"
|
||||
)
|
||||
|
||||
model = get_peft_model(model, lora_config)
|
||||
```
|
||||
|
||||
|
||||
|
||||
### QLoRA-style training
|
||||
QLoRA adds trainable weights to all the linear layers in the transformer architecture. Since the attribute names for these linear layers can vary across architectures, we provide a convenient flag `'all-linear'` for this setting:
|
||||
|
||||
```py
|
||||
config = LoraConfig(target_modules="all-linear", ...) # adds LoRA to all linear layers like in QLoRA
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
If you're interested in learning more about quantization, the following may be helpful:
|
||||
|
||||
* Learn more about details about QLoRA and check out some benchmarks on its impact in the [Making LLMs even more accessible with bitsandbytes, 4-bit quantization and QLoRA](https://huggingface.co/blog/4bit-transformers-bitsandbytes) blog post.
|
||||
* Read more about different quantization schemes in the Transformers [Quantization](https://hf.co/docs/transformers/main/quantization) guide.
|
139
docs/source/developer_guides/troubleshooting.md
Normal file
139
docs/source/developer_guides/troubleshooting.md
Normal file
@ -0,0 +1,139 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Troubleshooting
|
||||
|
||||
If you encounter any issue when using PEFT, please check the following list of common issues and their solutions.
|
||||
|
||||
## Examples don't work
|
||||
|
||||
Examples often rely on the most recent package versions, so please ensure they're up-to-date. In particular, check the version of the following packages:
|
||||
|
||||
- `peft`
|
||||
- `transformers`
|
||||
- `accelerate`
|
||||
- `torch`
|
||||
|
||||
In general, you can update the package version by running this command inside your Python environment:
|
||||
|
||||
```bash
|
||||
python -m pip install -U <package_name>
|
||||
```
|
||||
|
||||
Installing PEFT from source is useful for keeping up with the latest developments:
|
||||
|
||||
```bash
|
||||
python -m pip install git+https://github.com/huggingface/peft
|
||||
```
|
||||
|
||||
## Training errors
|
||||
|
||||
### Getting: ValueError: Attempting to unscale FP16 gradients
|
||||
|
||||
This error probably occurred because the model was loaded with `torch_dtype=torch.float16` and then used in an automatic mixed precision (AMP) context, e.g. by setting `fp16=True` in the `Trainer` class from 🤗 Transformers. The reason is that when using AMP, trainable weights should never use fp16. To make this work without having to load the whole model in FP32, add the following snippet to your code:
|
||||
|
||||
```python
|
||||
peft_model = get_peft_model(...)
|
||||
|
||||
# add this:
|
||||
for param in model.parameters():
|
||||
if param.requires_grad:
|
||||
param.data = param.data.float()
|
||||
|
||||
# proceed as usual
|
||||
trainer = Trainer(model=peft_model, fp16=True, ...)
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
Alternatively, you can use the utility function `cast_mixed_precision_params` from peft as shown below:
|
||||
```python
|
||||
from peft import cast_mixed_precision_params
|
||||
|
||||
peft_model = get_peft_model(...)
|
||||
cast_mixed_precision_params(peft_model, dtype=torch.float16)
|
||||
|
||||
# proceed as usual
|
||||
trainer = Trainer(model=peft_model, fp16=True, ...)
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
|
||||
## Bad results from a loaded PEFT model
|
||||
|
||||
There can be several reasons for getting a poor result from a loaded PEFT model, which are listed below. If you're still unable to troubleshoot the problem, see if anyone else had a similar [issue](https://github.com/huggingface/peft/issues) on GitHub, and if you can't find any, open a new issue.
|
||||
|
||||
When opening an issue, it helps a lot if you provide a minimal code example that reproduces the issue. Also, please report if the loaded model performs at the same level as the model did before fine-tuning, if it performs at a random level, or if it is only slightly worse than expected. This information helps us identify the problem more quickly.
|
||||
|
||||
### Random deviations
|
||||
|
||||
If your model outputs are not exactly the same as previous runs, there could be an issue with random elements. For example:
|
||||
|
||||
1. please ensure it is in `.eval()` mode, which is important, for instance, if the model uses dropout
|
||||
2. if you use [`~transformers.GenerationMixin.generate`] on a language model, there could be random sampling, so obtaining the same result requires setting a random seed
|
||||
3. if you used quantization and merged the weights, small deviations are expected due to rounding errors
|
||||
|
||||
### Incorrectly loaded model
|
||||
|
||||
Please ensure that you load the model correctly. A common error is trying to load a _trained_ model with `get_peft_model`, which is incorrect. Instead, the loading code should look like this:
|
||||
|
||||
```python
|
||||
from peft import PeftModel, PeftConfig
|
||||
|
||||
base_model = ... # to load the base model, use the same code as when you trained it
|
||||
config = PeftConfig.from_pretrained(peft_model_id)
|
||||
peft_model = PeftModel.from_pretrained(base_model, peft_model_id)
|
||||
```
|
||||
|
||||
### Randomly initialized layers
|
||||
|
||||
For some tasks, it is important to correctly configure `modules_to_save` in the config to account for randomly initialized layers.
|
||||
|
||||
As an example, this is necessary if you use LoRA to fine-tune a language model for sequence classification because 🤗 Transformers adds a randomly initialized classification head on top of the model. If you do not add this layer to `modules_to_save`, the classification head won't be saved. The next time you load the model, you'll get a _different_ randomly initialized classification head, resulting in completely different results.
|
||||
|
||||
In PEFT, we try to correctly guess the `modules_to_save` if you provide the `task_type` argument in the config. This should work for transformers models that follow the standard naming scheme. It is always a good idea to double check though because we can't guarantee all models follow the naming scheme.
|
||||
|
||||
When you load a transformers model that has randomly initialized layers, you should see a warning along the lines of:
|
||||
|
||||
```
|
||||
Some weights of <MODEL> were not initialized from the model checkpoint at <ID> and are newly initialized: [<LAYER_NAMES>].
|
||||
You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
|
||||
```
|
||||
|
||||
The mentioned layers should be added to `modules_to_save` in the config to avoid the described problem.
|
||||
|
||||
### Extending the vocabulary
|
||||
|
||||
For many language fine-tuning tasks, extending the model's vocabulary is necessary since new tokens are being introduced. This requires extending the embedding layer to account for the new tokens and also storing the embedding layer in addition to the adapter weights when saving the adapter.
|
||||
|
||||
Save the embedding layer by adding it to the `target_modules` of the config. The embedding layer name must follow the standard naming scheme from Transformers. For example, the Mistral config could look like this:
|
||||
|
||||
```python
|
||||
config = LoraConfig(..., target_modules=["embed_tokens", "lm_head", "q_proj", "v_proj"])
|
||||
```
|
||||
|
||||
Once added to `target_modules`, PEFT automatically stores the embedding layer when saving the adapter if the model has the [`~transformers.PreTrainedModel.get_input_embeddings`] and [`~transformers.PreTrainedModel.get_output_embeddings`]. This is generally the case for Transformers models.
|
||||
|
||||
If the model's embedding layer doesn't follow the Transformer's naming scheme, you can still save it by manually passing `save_embedding_layers=True` when saving the adapter:
|
||||
|
||||
```python
|
||||
model = get_peft_model(...)
|
||||
# train the model
|
||||
model.save_adapter("my_adapter", save_embedding_layers=True)
|
||||
```
|
||||
|
||||
For inference, load the base model first and resize it the same way you did before you trained the model. After you've resized the base model, you can load the PEFT checkpoint.
|
||||
|
||||
For a complete example, please check out [this notebook](https://github.com/huggingface/peft/blob/main/examples/causal_language_modeling/peft_lora_clm_with_additional_tokens.ipynb).
|
49
docs/source/index.md
Normal file
49
docs/source/index.md
Normal file
@ -0,0 +1,49 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# PEFT
|
||||
|
||||
🤗 PEFT (Parameter-Efficient Fine-Tuning) is a library for efficiently adapting large pretrained models to various downstream applications without fine-tuning all of a model's parameters because it is prohibitively costly. PEFT methods only fine-tune a small number of (extra) model parameters - significantly decreasing computational and storage costs - while yielding performance comparable to a fully fine-tuned model. This makes it more accessible to train and store large language models (LLMs) on consumer hardware.
|
||||
|
||||
PEFT is integrated with the Transformers, Diffusers, and Accelerate libraries to provide a faster and easier way to load, train, and use large models for inference.
|
||||
|
||||
<div class="mt-10">
|
||||
<div class="w-full flex flex-col space-y-4 md:space-y-0 md:grid md:grid-cols-2 md:gap-y-4 md:gap-x-5">
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="quicktour"
|
||||
><div class="w-full text-center bg-gradient-to-br from-blue-400 to-blue-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Get started</div>
|
||||
<p class="text-gray-700">Start here if you're new to 🤗 PEFT to get an overview of the library's main features, and how to train a model with a PEFT method.</p>
|
||||
</a>
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./task_guides/image_classification_lora"
|
||||
><div class="w-full text-center bg-gradient-to-br from-indigo-400 to-indigo-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">How-to guides</div>
|
||||
<p class="text-gray-700">Practical guides demonstrating how to apply various PEFT methods across different types of tasks like image classification, causal language modeling, automatic speech recognition, and more. Learn how to use 🤗 PEFT with the DeepSpeed and Fully Sharded Data Parallel scripts.</p>
|
||||
</a>
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./conceptual_guides/lora"
|
||||
><div class="w-full text-center bg-gradient-to-br from-pink-400 to-pink-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Conceptual guides</div>
|
||||
<p class="text-gray-700">Get a better theoretical understanding of how LoRA and various soft prompting methods help reduce the number of trainable parameters to make training more efficient.</p>
|
||||
</a>
|
||||
<a class="!no-underline border dark:border-gray-700 p-5 rounded-lg shadow hover:shadow-lg" href="./package_reference/config"
|
||||
><div class="w-full text-center bg-gradient-to-br from-purple-400 to-purple-500 rounded-lg py-1.5 font-semibold mb-5 text-white text-lg leading-relaxed">Reference</div>
|
||||
<p class="text-gray-700">Technical descriptions of how 🤗 PEFT classes and methods work.</p>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<iframe
|
||||
src="https://stevhliu-peft-methods.hf.space"
|
||||
frameborder="0"
|
||||
width="850"
|
||||
height="620"
|
||||
></iframe>
|
@ -1,117 +0,0 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# PEFT
|
||||
|
||||
🤗 PEFT, or Parameter-Efficient Fine-Tuning (PEFT), is a library for efficiently adapting pre-trained language models (PLMs) to various downstream applications without fine-tuning all the model's parameters.
|
||||
PEFT methods only fine-tune a small number of (extra) model parameters, significantly decreasing computational and storage costs because fine-tuning large-scale PLMs is prohibitively costly.
|
||||
Recent state-of-the-art PEFT techniques achieve performance comparable to that of full fine-tuning.
|
||||
|
||||
PEFT is seamlessly integrated with 🤗 Accelerate for large-scale models leveraging DeepSpeed and [Big Model Inference](https://huggingface.co/docs/accelerate/usage_guides/big_modeling).
|
||||
|
||||
If you are new to PEFT, get started by reading the [Quicktour](quicktour) guide and conceptual guides for [LoRA](/conceptual_guides/lora) and [Prompting](/conceptual_guides/prompting) methods.
|
||||
|
||||
## Supported methods
|
||||
|
||||
1. LoRA: [LORA: LOW-RANK ADAPTATION OF LARGE LANGUAGE MODELS](https://arxiv.org/pdf/2106.09685.pdf)
|
||||
2. Prefix Tuning: [Prefix-Tuning: Optimizing Continuous Prompts for Generation](https://aclanthology.org/2021.acl-long.353/), [P-Tuning v2: Prompt Tuning Can Be Comparable to Fine-tuning Universally Across Scales and Tasks](https://arxiv.org/pdf/2110.07602.pdf)
|
||||
3. P-Tuning: [GPT Understands, Too](https://arxiv.org/pdf/2103.10385.pdf)
|
||||
4. Prompt Tuning: [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/pdf/2104.08691.pdf)
|
||||
5. AdaLoRA: [Adaptive Budget Allocation for Parameter-Efficient Fine-Tuning](https://arxiv.org/abs/2303.10512)
|
||||
6. [LLaMA-Adapter: Efficient Fine-tuning of Language Models with Zero-init Attention](https://github.com/ZrrSkywalker/LLaMA-Adapter)
|
||||
## Supported models
|
||||
|
||||
The tables provided below list the PEFT methods and models supported for each task. To apply a particular PEFT method for
|
||||
a task, please refer to the corresponding Task guides.
|
||||
|
||||
### Causal Language Modeling
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
|--------------| ---- | ---- | ---- | ---- |
|
||||
| GPT-2 | ✅ | ✅ | ✅ | ✅ |
|
||||
| Bloom | ✅ | ✅ | ✅ | ✅ |
|
||||
| OPT | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-Neo | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-J | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-NeoX-20B | ✅ | ✅ | ✅ | ✅ |
|
||||
| LLaMA | ✅ | ✅ | ✅ | ✅ |
|
||||
| ChatGLM | ✅ | ✅ | ✅ | ✅ |
|
||||
|
||||
### Conditional Generation
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| T5 | ✅ | ✅ | ✅ | ✅ |
|
||||
| BART | ✅ | ✅ | ✅ | ✅ |
|
||||
|
||||
### Sequence Classification
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| BERT | ✅ | ✅ | ✅ | ✅ |
|
||||
| RoBERTa | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-2 | ✅ | ✅ | ✅ | ✅ |
|
||||
| Bloom | ✅ | ✅ | ✅ | ✅ |
|
||||
| OPT | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-Neo | ✅ | ✅ | ✅ | ✅ |
|
||||
| GPT-J | ✅ | ✅ | ✅ | ✅ |
|
||||
| Deberta | ✅ | | ✅ | ✅ |
|
||||
| Deberta-v2 | ✅ | | ✅ | ✅ |
|
||||
|
||||
### Token Classification
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| BERT | ✅ | ✅ | | |
|
||||
| RoBERTa | ✅ | ✅ | | |
|
||||
| GPT-2 | ✅ | ✅ | | |
|
||||
| Bloom | ✅ | ✅ | | |
|
||||
| OPT | ✅ | ✅ | | |
|
||||
| GPT-Neo | ✅ | ✅ | | |
|
||||
| GPT-J | ✅ | ✅ | | |
|
||||
| Deberta | ✅ | | | |
|
||||
| Deberta-v2 | ✅ | | | |
|
||||
|
||||
### Text-to-Image Generation
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| Stable Diffusion | ✅ | | | |
|
||||
|
||||
|
||||
### Image Classification
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| ViT | ✅ | | | |
|
||||
| Swin | ✅ | | | |
|
||||
|
||||
### Image to text (Multi-modal models)
|
||||
|
||||
We have tested LoRA for [ViT](https://huggingface.co/docs/transformers/model_doc/vit) and [Swin](https://huggingface.co/docs/transformers/model_doc/swin) for fine-tuning on image classification.
|
||||
However, it should be possible to use LoRA for any [ViT-based model](https://huggingface.co/models?pipeline_tag=image-classification&sort=downloads&search=vit) from 🤗 Transformers.
|
||||
Check out the [Image classification](/task_guides/image_classification_lora) task guide to learn more. If you run into problems, please open an issue.
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| Blip-2 | ✅ | | | |
|
||||
|
||||
|
||||
### Semantic Segmentation
|
||||
|
||||
As with image-to-text models, you should be able to apply LoRA to any of the [segmentation models](https://huggingface.co/models?pipeline_tag=image-segmentation&sort=downloads).
|
||||
It's worth noting that we haven't tested this with every architecture yet. Therefore, if you come across any issues, kindly create an issue report.
|
||||
|
||||
| Model | LoRA | Prefix Tuning | P-Tuning | Prompt Tuning |
|
||||
| --------- | ---- | ---- | ---- | ---- |
|
||||
| SegFormer | ✅ | | | |
|
||||
|
@ -8,17 +8,21 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Installation
|
||||
|
||||
Before you start, you will need to setup your environment, install the appropriate packages, and configure 🤗 PEFT. 🤗 PEFT is tested on **Python 3.7+**.
|
||||
Before you start, you will need to setup your environment, install the appropriate packages, and configure 🤗 PEFT. 🤗 PEFT is tested on **Python 3.8+**.
|
||||
|
||||
🤗 PEFT is available on pypi, as well as GitHub:
|
||||
🤗 PEFT is available on PyPI, as well as GitHub:
|
||||
|
||||
## pip
|
||||
## PyPI
|
||||
|
||||
To install 🤗 PEFT from pypi:
|
||||
To install 🤗 PEFT from PyPI:
|
||||
|
||||
```bash
|
||||
pip install peft
|
31
docs/source/package_reference/adalora.md
Normal file
31
docs/source/package_reference/adalora.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# AdaLoRA
|
||||
|
||||
[AdaLoRA](https://hf.co/papers/2303.10512) is a method for optimizing the number of trainable parameters to assign to weight matrices and layers, unlike LoRA, which distributes parameters evenly across all modules. More parameters are budgeted for important weight matrices and layers while less important ones receive fewer parameters.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*Fine-tuning large pre-trained language models on downstream tasks has become an important paradigm in NLP. However, common practice fine-tunes all of the parameters in a pre-trained model, which becomes prohibitive when a large number of downstream tasks are present. Therefore, many fine-tuning methods are proposed to learn incremental updates of pre-trained weights in a parameter efficient way, e.g., low-rank increments. These methods often evenly distribute the budget of incremental updates across all pre-trained weight matrices, and overlook the varying importance of different weight parameters. As a consequence, the fine-tuning performance is suboptimal. To bridge this gap, we propose AdaLoRA, which adaptively allocates the parameter budget among weight matrices according to their importance score. In particular, AdaLoRA parameterizes the incremental updates in the form of singular value decomposition. Such a novel approach allows us to effectively prune the singular values of unimportant updates, which is essentially to reduce their parameter budget but circumvent intensive exact SVD computations. We conduct extensive experiments with several pre-trained models on natural language processing, question answering, and natural language generation to validate the effectiveness of AdaLoRA. Results demonstrate that AdaLoRA manifests notable improvement over baselines, especially in the low budget settings. Our code is publicly available at https://github.com/QingruZhang/AdaLoRA*.
|
||||
|
||||
## AdaLoraConfig
|
||||
|
||||
[[autodoc]] tuners.adalora.config.AdaLoraConfig
|
||||
|
||||
## AdaLoraModel
|
||||
|
||||
[[autodoc]] tuners.adalora.model.AdaLoraModel
|
31
docs/source/package_reference/adapter_utils.md
Normal file
31
docs/source/package_reference/adapter_utils.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# LyCORIS
|
||||
|
||||
[LyCORIS](https://hf.co/papers/2309.14859) (Lora beYond Conventional methods, Other Rank adaptation Implementations for Stable diffusion) are LoRA-like matrix decomposition adapters that modify the cross-attention layer of the UNet. The [LoHa](loha) and [LoKr](lokr) methods inherit from the `Lycoris` classes here.
|
||||
|
||||
## LycorisConfig
|
||||
|
||||
[[autodoc]] tuners.lycoris_utils.LycorisConfig
|
||||
|
||||
## LycorisLayer
|
||||
|
||||
[[autodoc]] tuners.lycoris_utils.LycorisLayer
|
||||
|
||||
## LycorisTuner
|
||||
|
||||
[[autodoc]] tuners.lycoris_utils.LycorisTuner
|
48
docs/source/package_reference/auto_class.md
Normal file
48
docs/source/package_reference/auto_class.md
Normal file
@ -0,0 +1,48 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# AutoPeftModels
|
||||
|
||||
The `AutoPeftModel` classes loads the appropriate PEFT model for the task type by automatically inferring it from the configuration file. They are designed to quickly and easily load a PEFT model in a single line of code without having to worry about which exact model class you need or manually loading a [`PeftConfig`].
|
||||
|
||||
## AutoPeftModel
|
||||
|
||||
[[autodoc]] auto.AutoPeftModel
|
||||
- from_pretrained
|
||||
|
||||
## AutoPeftModelForCausalLM
|
||||
|
||||
[[autodoc]] auto.AutoPeftModelForCausalLM
|
||||
|
||||
## AutoPeftModelForSeq2SeqLM
|
||||
|
||||
[[autodoc]] auto.AutoPeftModelForSeq2SeqLM
|
||||
|
||||
## AutoPeftModelForSequenceClassification
|
||||
|
||||
[[autodoc]] auto.AutoPeftModelForSequenceClassification
|
||||
|
||||
## AutoPeftModelForTokenClassification
|
||||
|
||||
[[autodoc]] auto.AutoPeftModelForTokenClassification
|
||||
|
||||
## AutoPeftModelForQuestionAnswering
|
||||
|
||||
[[autodoc]] auto.AutoPeftModelForQuestionAnswering
|
||||
|
||||
## AutoPeftModelForFeatureExtraction
|
||||
|
||||
[[autodoc]] auto.AutoPeftModelForFeatureExtraction
|
22
docs/source/package_reference/config.md
Normal file
22
docs/source/package_reference/config.md
Normal file
@ -0,0 +1,22 @@
|
||||
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Configuration
|
||||
|
||||
[`PeftConfigMixin`] is the base configuration class for storing the adapter configuration of a [`PeftModel`], and [`PromptLearningConfig`] is the base configuration class for soft prompt methods (p-tuning, prefix tuning, and prompt tuning). These base classes contain methods for saving and loading model configurations from the Hub, specifying the PEFT method to use, type of task to perform, and model configurations like number of layers and number of attention heads.
|
||||
|
||||
## PeftConfigMixin
|
||||
|
||||
[[autodoc]] config.PeftConfigMixin
|
||||
- all
|
||||
|
||||
## PeftConfig
|
||||
|
||||
[[autodoc]] PeftConfig
|
||||
- all
|
||||
|
||||
## PromptLearningConfig
|
||||
|
||||
[[autodoc]] PromptLearningConfig
|
||||
- all
|
@ -1,18 +0,0 @@
|
||||
# Configuration
|
||||
|
||||
The configuration classes stores the configuration of a [`PeftModel`], PEFT adapter models, and the configurations of [`PrefixTuning`], [`PromptTuning`], and [`PromptEncoder`]. They contain methods for saving and loading model configurations from the Hub, specifying the PEFT method to use, type of task to perform, and model configurations like number of layers and number of attention heads.
|
||||
|
||||
## PeftConfigMixin
|
||||
|
||||
[[autodoc]] utils.config.PeftConfigMixin
|
||||
- all
|
||||
|
||||
## PeftConfig
|
||||
|
||||
[[autodoc]] PeftConfig
|
||||
- all
|
||||
|
||||
## PromptLearningConfig
|
||||
|
||||
[[autodoc]] PromptLearningConfig
|
||||
- all
|
31
docs/source/package_reference/ia3.md
Normal file
31
docs/source/package_reference/ia3.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# IA3
|
||||
|
||||
Infused Adapter by Inhibiting and Amplifying Inner Activations, or [IA3](https://hf.co/papers/2205.05638), is a method that adds three learned vectors to rescale the keys and values of the self-attention and encoder-decoder attention layers, and the intermediate activation of the position-wise feed-forward network.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*Few-shot in-context learning (ICL) enables pre-trained language models to perform a previously-unseen task without any gradient-based training by feeding a small number of training examples as part of the input. ICL incurs substantial computational, memory, and storage costs because it involves processing all of the training examples every time a prediction is made. Parameter-efficient fine-tuning (PEFT) (e.g. adapter modules, prompt tuning, sparse update methods, etc.) offers an alternative paradigm where a small set of parameters are trained to enable a model to perform the new task. In this paper, we rigorously compare few-shot ICL and PEFT and demonstrate that the latter offers better accuracy as well as dramatically lower computational costs. Along the way, we introduce a new PEFT method called (IA)^3 that scales activations by learned vectors, attaining stronger performance while only introducing a relatively tiny amount of new parameters. We also propose a simple recipe based on the T0 model called T-Few that can be applied to new tasks without task-specific tuning or modifications. We validate the effectiveness of T-Few on completely unseen tasks by applying it to the RAFT benchmark, attaining super-human performance for the first time and outperforming the state-of-the-art by 6% absolute. All of the code used in our experiments is publicly available*.
|
||||
|
||||
## IA3Config
|
||||
|
||||
[[autodoc]] tuners.ia3.config.IA3Config
|
||||
|
||||
## IA3Model
|
||||
|
||||
[[autodoc]] tuners.ia3.model.IA3Model
|
31
docs/source/package_reference/llama_adapter.md
Normal file
31
docs/source/package_reference/llama_adapter.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Llama-Adapter
|
||||
|
||||
[Llama-Adapter](https://hf.co/papers/2303.16199) is a PEFT method specifically designed for turning Llama into an instruction-following model. The Llama model is frozen and only a set of adaptation prompts prefixed to the input instruction tokens are learned. Since randomly initialized modules inserted into the model can cause the model to lose some of its existing knowledge, Llama-Adapter uses zero-initialized attention with zero gating to progressively add the instructional prompts to the model.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*We present LLaMA-Adapter, a lightweight adaption method to efficiently fine-tune LLaMA into an instruction-following model. Using 52K self-instruct demonstrations, LLaMA-Adapter only introduces 1.2M learnable parameters upon the frozen LLaMA 7B model, and costs less than one hour for fine-tuning on 8 A100 GPUs. Specifically, we adopt a set of learnable adaption prompts, and prepend them to the input text tokens at higher transformer layers. Then, a zero-init attention mechanism with zero gating is proposed, which adaptively injects the new instructional cues into LLaMA, while effectively preserves its pre-trained knowledge. With efficient training, LLaMA-Adapter generates high-quality responses, comparable to Alpaca with fully fine-tuned 7B parameters. Furthermore, our approach can be simply extended to multi-modal input, e.g., images, for image-conditioned LLaMA, which achieves superior reasoning capacity on ScienceQA. We release our code at https://github.com/ZrrSkywalker/LLaMA-Adapter*.
|
||||
|
||||
## AdaptionPromptConfig
|
||||
|
||||
[[autodoc]] tuners.adaption_prompt.config.AdaptionPromptConfig
|
||||
|
||||
## AdaptionPromptModel
|
||||
|
||||
[[autodoc]] tuners.adaption_prompt.model.AdaptionPromptModel
|
31
docs/source/package_reference/loha.md
Normal file
31
docs/source/package_reference/loha.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# LoHa
|
||||
|
||||
Low-Rank Hadamard Product ([LoHa](https://huggingface.co/papers/2108.06098)), is similar to LoRA except it approximates the large weight matrix with more low-rank matrices and combines them with the Hadamard product. This method is even more parameter-efficient than LoRA and achieves comparable performance.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*In this work, we propose a communication-efficient parameterization, FedPara, for federated learning (FL) to overcome the burdens on frequent model uploads and downloads. Our method re-parameterizes weight parameters of layers using low-rank weights followed by the Hadamard product. Compared to the conventional low-rank parameterization, our FedPara method is not restricted to low-rank constraints, and thereby it has a far larger capacity. This property enables to achieve comparable performance while requiring 3 to 10 times lower communication costs than the model with the original layers, which is not achievable by the traditional low-rank methods. The efficiency of our method can be further improved by combining with other efficient FL optimizers. In addition, we extend our method to a personalized FL application, pFedPara, which separates parameters into global and local ones. We show that pFedPara outperforms competing personalized FL methods with more than three times fewer parameters*.
|
||||
|
||||
## LoHaConfig
|
||||
|
||||
[[autodoc]] tuners.loha.config.LoHaConfig
|
||||
|
||||
## LoHaModel
|
||||
|
||||
[[autodoc]] tuners.loha.model.LoHaModel
|
27
docs/source/package_reference/lokr.md
Normal file
27
docs/source/package_reference/lokr.md
Normal file
@ -0,0 +1,27 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# LoKr
|
||||
|
||||
Low-Rank Kronecker Product ([LoKr](https://hf.co/papers/2309.14859)), is a LoRA-variant method that approximates the large weight matrix with two low-rank matrices and combines them with the Kronecker product. LoKr also provides an optional third low-rank matrix to provide better control during fine-tuning.
|
||||
|
||||
## LoKrConfig
|
||||
|
||||
[[autodoc]] tuners.lokr.config.LoKrConfig
|
||||
|
||||
## LoKrModel
|
||||
|
||||
[[autodoc]] tuners.lokr.model.LoKrModel
|
31
docs/source/package_reference/lora.md
Normal file
31
docs/source/package_reference/lora.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# LoRA
|
||||
|
||||
Low-Rank Adaptation ([LoRA](https://huggingface.co/papers/2309.15223)) is a PEFT method that decomposes a large matrix into two smaller low-rank matrices in the attention layers. This drastically reduces the number of parameters that need to be fine-tuned.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*We propose a neural language modeling system based on low-rank adaptation (LoRA) for speech recognition output rescoring. Although pretrained language models (LMs) like BERT have shown superior performance in second-pass rescoring, the high computational cost of scaling up the pretraining stage and adapting the pretrained models to specific domains limit their practical use in rescoring. Here we present a method based on low-rank decomposition to train a rescoring BERT model and adapt it to new domains using only a fraction (0.08%) of the pretrained parameters. These inserted matrices are optimized through a discriminative training objective along with a correlation-based regularization loss. The proposed low-rank adaptation Rescore-BERT (LoRB) architecture is evaluated on LibriSpeech and internal datasets with decreased training times by factors between 5.4 and 3.6.*.
|
||||
|
||||
## LoraConfig
|
||||
|
||||
[[autodoc]] tuners.lora.config.LoraConfig
|
||||
|
||||
## LoraModel
|
||||
|
||||
[[autodoc]] tuners.lora.model.LoraModel
|
31
docs/source/package_reference/multitask_prompt_tuning.md
Normal file
31
docs/source/package_reference/multitask_prompt_tuning.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Multitask Prompt Tuning
|
||||
|
||||
[Multitask Prompt Tuning](https://huggingface.co/papers/2303.02861) decomposes the soft prompts of each task into a single learned transferable prompt instead of a separate prompt for each task. The single learned prompt can be adapted for each task by multiplicative low rank updates.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*Prompt tuning, in which a base pretrained model is adapted to each task via conditioning on learned prompt vectors, has emerged as a promising approach for efficiently adapting large language models to multiple downstream tasks. However, existing methods typically learn soft prompt vectors from scratch, and it has not been clear how to exploit the rich cross-task knowledge with prompt vectors in a multitask learning setting. We propose multitask prompt tuning (MPT), which first learns a single transferable prompt by distilling knowledge from multiple task-specific source prompts. We then learn multiplicative low rank updates to this shared prompt to efficiently adapt it to each downstream target task. Extensive experiments on 23 NLP datasets demonstrate that our proposed approach outperforms the state-of-the-art methods, including the full finetuning baseline in some cases, despite only tuning 0.035% as many task-specific parameters*.
|
||||
|
||||
## MultitaskPromptTuningConfig
|
||||
|
||||
[[autodoc]] tuners.multitask_prompt_tuning.config.MultitaskPromptTuningConfig
|
||||
|
||||
## MultitaskPromptEmbedding
|
||||
|
||||
[[autodoc]] tuners.multitask_prompt_tuning.model.MultitaskPromptEmbedding
|
31
docs/source/package_reference/oft.md
Normal file
31
docs/source/package_reference/oft.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# OFT
|
||||
|
||||
[Orthogonal Finetuning (OFT)](https://hf.co/papers/2306.07280) is a method developed for adapting text-to-image diffusion models. It works by reparameterizing the pretrained weight matrices with it's orthogonal matrix to preserve information in the pretrained model. To reduce the number of parameters, OFT introduces a block-diagonal structure in the orthogonal matrix.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*Large text-to-image diffusion models have impressive capabilities in generating photorealistic images from text prompts. How to effectively guide or control these powerful models to perform different downstream tasks becomes an important open problem. To tackle this challenge, we introduce a principled finetuning method -- Orthogonal Finetuning (OFT), for adapting text-to-image diffusion models to downstream tasks. Unlike existing methods, OFT can provably preserve hyperspherical energy which characterizes the pairwise neuron relationship on the unit hypersphere. We find that this property is crucial for preserving the semantic generation ability of text-to-image diffusion models. To improve finetuning stability, we further propose Constrained Orthogonal Finetuning (COFT) which imposes an additional radius constraint to the hypersphere. Specifically, we consider two important finetuning text-to-image tasks: subject-driven generation where the goal is to generate subject-specific images given a few images of a subject and a text prompt, and controllable generation where the goal is to enable the model to take in additional control signals. We empirically show that our OFT framework outperforms existing methods in generation quality and convergence speed*.
|
||||
|
||||
## OFTConfig
|
||||
|
||||
[[autodoc]] tuners.oft.config.OFTConfig
|
||||
|
||||
## OFTModel
|
||||
|
||||
[[autodoc]] tuners.oft.model.OFTModel
|
31
docs/source/package_reference/p_tuning.md
Normal file
31
docs/source/package_reference/p_tuning.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# P-tuning
|
||||
|
||||
[P-tuning](https://hf.co/papers/2103.10385) adds trainable prompt embeddings to the input that is optimized by a prompt encoder to find a better prompt, eliminating the need to manually design prompts. The prompt tokens can be added anywhere in the input sequence, and p-tuning also introduces anchor tokens for improving performance.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*While GPTs with traditional fine-tuning fail to achieve strong results on natural language understanding (NLU), we show that GPTs can be better than or comparable to similar-sized BERTs on NLU tasks with a novel method P-tuning -- which employs trainable continuous prompt embeddings. On the knowledge probing (LAMA) benchmark, the best GPT recovers 64\% (P@1) of world knowledge without any additional text provided during test time, which substantially improves the previous best by 20+ percentage points. On the SuperGlue benchmark, GPTs achieve comparable and sometimes better performance to similar-sized BERTs in supervised learning. Importantly, we find that P-tuning also improves BERTs' performance in both few-shot and supervised settings while largely reducing the need for prompt engineering. Consequently, P-tuning outperforms the state-of-the-art approaches on the few-shot SuperGlue benchmark.*.
|
||||
|
||||
## PromptEncoderConfig
|
||||
|
||||
[[autodoc]] tuners.p_tuning.config.PromptEncoderConfig
|
||||
|
||||
## PromptEncoder
|
||||
|
||||
[[autodoc]] tuners.p_tuning.model.PromptEncoder
|
@ -1,6 +1,10 @@
|
||||
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# Models
|
||||
|
||||
[`PeftModel`] is the base model class for specifying the base Transformer model and configuration to apply a PEFT method to. The base `PeftModel` contains methods for loading and saving models from the Hub, and supports the [`PromptEncoder`] for prompt learning.
|
||||
[`PeftModel`] is the base model class for specifying the base Transformer model and configuration to apply a PEFT method to. The base `PeftModel` contains methods for loading and saving models from the Hub.
|
||||
|
||||
## PeftModel
|
||||
|
||||
@ -34,3 +38,30 @@ A `PeftModel` for sequence-to-sequence language modeling.
|
||||
|
||||
[[autodoc]] PeftModelForSeq2SeqLM
|
||||
- all
|
||||
|
||||
## PeftModelForQuestionAnswering
|
||||
|
||||
A `PeftModel` for question answering.
|
||||
|
||||
[[autodoc]] PeftModelForQuestionAnswering
|
||||
- all
|
||||
|
||||
## PeftModelForFeatureExtraction
|
||||
|
||||
A `PeftModel` for getting extracting features/embeddings from transformer models.
|
||||
|
||||
[[autodoc]] PeftModelForFeatureExtraction
|
||||
- all
|
||||
|
||||
## PeftMixedModel
|
||||
|
||||
A `PeftModel` for mixing different adapter types (e.g. LoRA and LoHa).
|
||||
|
||||
[[autodoc]] PeftMixedModel
|
||||
- all
|
||||
|
||||
## Utilities
|
||||
|
||||
[[autodoc]] get_peft_model
|
||||
|
||||
[[autodoc]] utils.prepare_model_for_kbit_training
|
27
docs/source/package_reference/peft_types.md
Normal file
27
docs/source/package_reference/peft_types.md
Normal file
@ -0,0 +1,27 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# PEFT types
|
||||
|
||||
[`PeftType`] includes the supported adapters in PEFT, and [`TaskType`] includes PEFT-supported tasks.
|
||||
|
||||
## PeftType
|
||||
|
||||
[[autodoc]] utils.peft_types.PeftType
|
||||
|
||||
## TaskType
|
||||
|
||||
[[autodoc]] utils.peft_types.TaskType
|
31
docs/source/package_reference/prefix_tuning.md
Normal file
31
docs/source/package_reference/prefix_tuning.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Prefix tuning
|
||||
|
||||
[Prefix tuning](https://hf.co/papers/2101.00190) prefixes a series of task-specific vectors to the input sequence that can be learned while keeping the pretrained model frozen. The prefix parameters are inserted in all of the model layers.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*Fine-tuning is the de facto way to leverage large pretrained language models to perform downstream tasks. However, it modifies all the language model parameters and therefore necessitates storing a full copy for each task. In this paper, we propose prefix-tuning, a lightweight alternative to fine-tuning for natural language generation tasks, which keeps language model parameters frozen, but optimizes a small continuous task-specific vector (called the prefix). Prefix-tuning draws inspiration from prompting, allowing subsequent tokens to attend to this prefix as if it were "virtual tokens". We apply prefix-tuning to GPT-2 for table-to-text generation and to BART for summarization. We find that by learning only 0.1\% of the parameters, prefix-tuning obtains comparable performance in the full data setting, outperforms fine-tuning in low-data settings, and extrapolates better to examples with topics unseen during training*.
|
||||
|
||||
## PrefixTuningConfig
|
||||
|
||||
[[autodoc]] tuners.prefix_tuning.config.PrefixTuningConfig
|
||||
|
||||
## PrefixEncoder
|
||||
|
||||
[[autodoc]] tuners.prefix_tuning.model.PrefixEncoder
|
31
docs/source/package_reference/prompt_tuning.md
Normal file
31
docs/source/package_reference/prompt_tuning.md
Normal file
@ -0,0 +1,31 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Prompt tuning
|
||||
|
||||
[Prompt tuning](https://hf.co/papers/2104.08691) adds task-specific prompts to the input, and these prompt parameters are updated independently of the pretrained model parameters which are frozen.
|
||||
|
||||
The abstract from the paper is:
|
||||
|
||||
*In this work, we explore "prompt tuning", a simple yet effective mechanism for learning "soft prompts" to condition frozen language models to perform specific downstream tasks. Unlike the discrete text prompts used by GPT-3, soft prompts are learned through backpropagation and can be tuned to incorporate signal from any number of labeled examples. Our end-to-end learned approach outperforms GPT-3's "few-shot" learning by a large margin. More remarkably, through ablations on model size using T5, we show that prompt tuning becomes more competitive with scale: as models exceed billions of parameters, our method "closes the gap" and matches the strong performance of model tuning (where all model weights are tuned). This finding is especially relevant in that large models are costly to share and serve, and the ability to reuse one frozen model for multiple downstream tasks can ease this burden. Our method can be seen as a simplification of the recently proposed "prefix tuning" of Li and Liang (2021), and we provide a comparison to this and other similar approaches. Finally, we show that conditioning a frozen model with soft prompts confers benefits in robustness to domain transfer, as compared to full model tuning*.
|
||||
|
||||
## PromptTuningConfig
|
||||
|
||||
[[autodoc]] tuners.prompt_tuning.config.PromptTuningConfig
|
||||
|
||||
## PromptEmbedding
|
||||
|
||||
[[autodoc]] tuners.prompt_tuning.model.PromptEmbedding
|
27
docs/source/package_reference/tuners.md
Normal file
27
docs/source/package_reference/tuners.md
Normal file
@ -0,0 +1,27 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Tuners
|
||||
|
||||
A tuner (or adapter) is a module that can be plugged into a `torch.nn.Module`. [`BaseTuner`] base class for other tuners and provides shared methods and attributes for preparing an adapter configuration and replacing a target module with the adapter module. [`BaseTunerLayer`] is a base class for adapter layers. It offers methods and attributes for managing adapters such as activating and disabling adapters.
|
||||
|
||||
## BaseTuner
|
||||
|
||||
[[autodoc]] tuners.tuners_utils.BaseTuner
|
||||
|
||||
## BaseTunerLayer
|
||||
|
||||
[[autodoc]] tuners.tuners_utils.BaseTunerLayer
|
@ -1,33 +0,0 @@
|
||||
# Tuners
|
||||
|
||||
Each tuner (or PEFT method) has a configuration and model.
|
||||
|
||||
## LoRA
|
||||
|
||||
For finetuning a model with LoRA.
|
||||
|
||||
[[autodoc]] LoraConfig
|
||||
|
||||
[[autodoc]] LoraModel
|
||||
|
||||
[[autodoc]] tuners.lora.LoraLayer
|
||||
|
||||
[[autodoc]] tuners.lora.Linear
|
||||
|
||||
## P-tuning
|
||||
|
||||
[[autodoc]] tuners.p_tuning.PromptEncoderConfig
|
||||
|
||||
[[autodoc]] tuners.p_tuning.PromptEncoder
|
||||
|
||||
## Prefix tuning
|
||||
|
||||
[[autodoc]] tuners.prefix_tuning.PrefixTuningConfig
|
||||
|
||||
[[autodoc]] tuners.prefix_tuning.PrefixEncoder
|
||||
|
||||
## Prompt tuning
|
||||
|
||||
[[autodoc]] tuners.prompt_tuning.PromptTuningConfig
|
||||
|
||||
[[autodoc]] tuners.prompt_tuning.PromptEmbedding
|
170
docs/source/quicktour.md
Normal file
170
docs/source/quicktour.md
Normal file
@ -0,0 +1,170 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Quicktour
|
||||
|
||||
PEFT offers parameter-efficient methods for finetuning large pretrained models. The traditional paradigm is to finetune all of a model's parameters for each downstream task, but this is becoming exceedingly costly and impractical because of the enormous number of parameters in models today. Instead, it is more efficient to train a smaller number of prompt parameters or use a reparametrization method like low-rank adaptation (LoRA) to reduce the number of trainable parameters.
|
||||
|
||||
This quicktour will show you PEFT's main features and how you can train or run inference on large models that would typically be inaccessible on consumer devices.
|
||||
|
||||
## Train
|
||||
|
||||
Each PEFT method is defined by a [`PeftConfig`] class that stores all the important parameters for building a [`PeftModel`]. For example, to train with LoRA, load and create a [`LoraConfig`] class and specify the following parameters:
|
||||
|
||||
- `task_type`: the task to train for (sequence-to-sequence language modeling in this case)
|
||||
- `inference_mode`: whether you're using the model for inference or not
|
||||
- `r`: the dimension of the low-rank matrices
|
||||
- `lora_alpha`: the scaling factor for the low-rank matrices
|
||||
- `lora_dropout`: the dropout probability of the LoRA layers
|
||||
|
||||
```python
|
||||
from peft import LoraConfig, TaskType
|
||||
|
||||
peft_config = LoraConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
See the [`LoraConfig`] reference for more details about other parameters you can adjust, such as the modules to target or the bias type.
|
||||
|
||||
</Tip>
|
||||
|
||||
Once the [`LoraConfig`] is setup, create a [`PeftModel`] with the [`get_peft_model`] function. It takes a base model - which you can load from the Transformers library - and the [`LoraConfig`] containing the parameters for how to configure a model for training with LoRA.
|
||||
|
||||
Load the base model you want to finetune.
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForSeq2SeqLM
|
||||
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/mt0-large")
|
||||
```
|
||||
|
||||
Wrap the base model and `peft_config` with the [`get_peft_model`] function to create a [`PeftModel`]. To get a sense of the number of trainable parameters in your model, use the [`print_trainable_parameters`] method.
|
||||
|
||||
```python
|
||||
from peft import get_peft_model
|
||||
|
||||
model = get_peft_model(model, peft_config)
|
||||
model.print_trainable_parameters()
|
||||
"output: trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282"
|
||||
```
|
||||
|
||||
Out of [bigscience/mt0-large's](https://huggingface.co/bigscience/mt0-large) 1.2B parameters, you're only training 0.19% of them!
|
||||
|
||||
That is it 🎉! Now you can train the model with the Transformers [`~transformers.Trainer`], Accelerate, or any custom PyTorch training loop.
|
||||
|
||||
For example, to train with the [`~transformers.Trainer`] class, setup a [`~transformers.TrainingArguments`] class with some training hyperparameters.
|
||||
|
||||
```py
|
||||
training_args = TrainingArguments(
|
||||
output_dir="your-name/bigscience/mt0-large-lora",
|
||||
learning_rate=1e-3,
|
||||
per_device_train_batch_size=32,
|
||||
per_device_eval_batch_size=32,
|
||||
num_train_epochs=2,
|
||||
weight_decay=0.01,
|
||||
evaluation_strategy="epoch",
|
||||
save_strategy="epoch",
|
||||
load_best_model_at_end=True,
|
||||
)
|
||||
```
|
||||
|
||||
Pass the model, training arguments, dataset, tokenizer, and any other necessary component to the [`~transformers.Trainer`], and call [`~transformers.Trainer.train`] to start training.
|
||||
|
||||
```py
|
||||
trainer = Trainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=tokenized_datasets["train"],
|
||||
eval_dataset=tokenized_datasets["test"],
|
||||
tokenizer=tokenizer,
|
||||
data_collator=data_collator,
|
||||
compute_metrics=compute_metrics,
|
||||
)
|
||||
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
### Save model
|
||||
|
||||
After your model is finished training, you can save your model to a directory using the [`~transformers.PreTrainedModel.save_pretrained`] function.
|
||||
|
||||
```py
|
||||
model.save_pretrained("output_dir")
|
||||
```
|
||||
|
||||
You can also save your model to the Hub (make sure you're logged in to your Hugging Face account first) with the [`~transformers.PreTrainedModel.push_to_hub`] function.
|
||||
|
||||
```python
|
||||
from huggingface_hub import notebook_login
|
||||
|
||||
notebook_login()
|
||||
model.push_to_hub("your-name/bigscience/mt0-large-lora")
|
||||
```
|
||||
|
||||
Both methods only save the extra PEFT weights that were trained, meaning it is super efficient to store, transfer, and load. For example, this [facebook/opt-350m](https://huggingface.co/ybelkada/opt-350m-lora) model trained with LoRA only contains two files: `adapter_config.json` and `adapter_model.safetensors`. The `adapter_model.safetensors` file is just 6.3MB!
|
||||
|
||||
<div class="flex flex-col justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/>
|
||||
<figcaption class="text-center">The adapter weights for a opt-350m model stored on the Hub are only ~6MB compared to the full size of the model weights, which can be ~700MB.</figcaption>
|
||||
</div>
|
||||
|
||||
## Inference
|
||||
|
||||
<Tip>
|
||||
|
||||
Take a look at the [AutoPeftModel](package_reference/auto_class) API reference for a complete list of available `AutoPeftModel` classes.
|
||||
|
||||
</Tip>
|
||||
|
||||
Easily load any PEFT-trained model for inference with the [`AutoPeftModel`] class and the [`~transformers.PreTrainedModel.from_pretrained`] method:
|
||||
|
||||
```py
|
||||
from peft import AutoPeftModelForCausalLM
|
||||
from transformers import AutoTokenizer
|
||||
import torch
|
||||
|
||||
model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora")
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
|
||||
|
||||
model = model.to("cuda")
|
||||
model.eval()
|
||||
inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt")
|
||||
|
||||
outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50)
|
||||
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0])
|
||||
|
||||
"Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla."
|
||||
```
|
||||
|
||||
For other tasks that aren't explicitly supported with an `AutoPeftModelFor` class - such as automatic speech recognition - you can still use the base [`AutoPeftModel`] class to load a model for the task.
|
||||
|
||||
```py
|
||||
from peft import AutoPeftModel
|
||||
|
||||
model = AutoPeftModel.from_pretrained("smangrul/openai-whisper-large-v2-LORA-colab")
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
Now that you've seen how to train a model with one of the PEFT methods, we encourage you to try out some of the other methods like prompt tuning. The steps are very similar to the ones shown in the quicktour:
|
||||
|
||||
1. prepare a [`PeftConfig`] for a PEFT method
|
||||
2. use the [`get_peft_model`] method to create a [`PeftModel`] from the configuration and base model
|
||||
|
||||
Then you can train it however you like! To load a PEFT model for inference, you can use the [`AutoPeftModel`] class.
|
||||
|
||||
Feel free to also take a look at the task guides if you're interested in training a model with another PEFT method for a specific task such as semantic segmentation, multilingual automatic speech recognition, DreamBooth, token classification, and more.
|
@ -1,111 +0,0 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Quicktour
|
||||
|
||||
🤗 PEFT contains parameter-efficient finetuning methods for training large pretrained models. The traditional paradigm is to finetune all of a model's parameters for each downstream task, but this is becoming exceedingly costly and impractical because of the enormous number of parameters in models today. Instead, it is more efficient to train a smaller number of prompt parameters or use a reparametrization method like low-rank adaptation (LoRA) to reduce the number of trainable parameters.
|
||||
|
||||
This quicktour will show you 🤗 PEFT's main features and help you train large pretrained models that would typically be inaccessible on consumer devices. You'll see how to train the 1.2B parameter [`bigscience/mt0-large`](https://huggingface.co/bigscience/mt0-large) model with LoRA to generate a classification label and use it for inference.
|
||||
|
||||
## PeftConfig
|
||||
|
||||
Each 🤗 PEFT method is defined by a [`PeftConfig`] class that stores all the important parameters for building a [`PeftModel`].
|
||||
|
||||
Because you're going to use LoRA, you'll need to load and create a [`LoraConfig`] class. Within `LoraConfig`, specify the following parameters:
|
||||
|
||||
- the `task_type`, or sequence-to-sequence language modeling in this case
|
||||
- `inference_mode`, whether you're using the model for inference or not
|
||||
- `r`, the dimension of the low-rank matrices
|
||||
- `lora_alpha`, the scaling factor for the low-rank matrices
|
||||
- `lora_dropout`, the dropout probability of the LoRA layers
|
||||
|
||||
```python
|
||||
from peft import LoraConfig, TaskType
|
||||
|
||||
peft_config = LoraConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1)
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 See the [`LoraConfig`] reference for more details about other parameters you can adjust.
|
||||
|
||||
</Tip>
|
||||
|
||||
## PeftModel
|
||||
|
||||
A [`PeftModel`] is created by the [`get_peft_model`] function. It takes a base model - which you can load from the 🤗 Transformers library - and the [`PeftConfig`] containing the instructions for how to configure a model for a specific 🤗 PEFT method.
|
||||
|
||||
Start by loading the base model you want to finetune.
|
||||
|
||||
```python
|
||||
from transformers import AutoModelForSeq2SeqLM
|
||||
|
||||
model_name_or_path = "bigscience/mt0-large"
|
||||
tokenizer_name_or_path = "bigscience/mt0-large"
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
|
||||
```
|
||||
|
||||
Wrap your base model and `peft_config` with the `get_peft_model` function to create a [`PeftModel`]. To get a sense of the number of trainable parameters in your model, use the [`print_trainable_parameters`] method. In this case, you're only training 0.19% of the model's parameters! 🤏
|
||||
|
||||
```python
|
||||
from peft import get_peft_model
|
||||
|
||||
model = get_peft_model(model, peft_config)
|
||||
model.print_trainable_parameters()
|
||||
"output: trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282"
|
||||
```
|
||||
|
||||
That is it 🎉! Now you can train the model using the 🤗 Transformers [`~transformers.Trainer`], 🤗 Accelerate, or any custom PyTorch training loop.
|
||||
|
||||
## Save and load a model
|
||||
|
||||
After your model is finished training, you can save your model to a directory using the [`~transformers.PreTrainedModel.save_pretrained`] function. You can also save your model to the Hub (make sure you log in to your Hugging Face account first) with the [`~transformers.PreTrainedModel.push_to_hub`] function.
|
||||
|
||||
```python
|
||||
model.save_pretrained("output_dir")
|
||||
|
||||
# if pushing to Hub
|
||||
from huggingface_hub import notebook_login
|
||||
|
||||
notebook_login()
|
||||
model.push_to_hub("my_awesome_peft_model")
|
||||
```
|
||||
|
||||
This only saves the incremental 🤗 PEFT weights that were trained, meaning it is super efficient to store, transfer, and load. For example, this [`bigscience/T0_3B`](https://huggingface.co/smangrul/twitter_complaints_bigscience_T0_3B_LORA_SEQ_2_SEQ_LM) model trained with LoRA on the [`twitter_complaints`](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints/train) subset of the RAFT [dataset](https://huggingface.co/datasets/ought/raft) only contains two files: `adapter_config.json` and `adapter_model.bin`. The latter file is just 19MB!
|
||||
|
||||
Easily load your model for inference using the [`~transformers.PreTrainedModel.from_pretrained`] function:
|
||||
|
||||
```diff
|
||||
from transformers import AutoModelForSeq2SeqLM
|
||||
+ from peft import PeftModel, PeftConfig
|
||||
|
||||
+ peft_model_id = "smangrul/twitter_complaints_bigscience_T0_3B_LORA_SEQ_2_SEQ_LM"
|
||||
+ config = PeftConfig.from_pretrained(peft_model_id)
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path)
|
||||
+ model = PeftModel.from_pretrained(model, peft_model_id)
|
||||
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
||||
|
||||
model = model.to(device)
|
||||
model.eval()
|
||||
inputs = tokenizer("Tweet text : @HondaCustSvc Your customer service has been horrible during the recall process. I will never purchase a Honda again. Label :", return_tensors="pt")
|
||||
|
||||
with torch.no_grad():
|
||||
outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=10)
|
||||
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)[0])
|
||||
'complaint'
|
||||
```
|
||||
|
||||
## Next steps
|
||||
|
||||
Now that you've seen how to train a model with one of the 🤗 PEFT methods, we encourage you to try out some of the other methods like prompt tuning. The steps are very similar to the ones shown in this quickstart; prepare a [`PeftConfig`] for a 🤗 PEFT method, and use the `get_peft_model` to create a [`PeftModel`] from the configuration and base model. Then you can train it however you like!
|
||||
|
||||
Feel free to also take a look at the task guides if you're interested in training a model with a 🤗 PEFT method for a specific task such as semantic segmentation, multilingual automatic speech recognition, DreamBooth, and token classification.
|
@ -1,288 +0,0 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
-->
|
||||
|
||||
# Prompt tuning for causal language modeling
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
Prompting helps guide language model behavior by adding some input text specific to a task. Prompt tuning is an additive method for only training and updating the newly added prompt tokens to a pretrained model. This way, you can use one pretrained model whose weights are frozen, and train and update a smaller set of prompt parameters for each downstream task instead of fully finetuning a separate model. As models grow larger and larger, prompt tuning can be more efficient, and results are even better as model parameters scale.
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 Read [The Power of Scale for Parameter-Efficient Prompt Tuning](https://arxiv.org/abs/2104.08691) to learn more about prompt tuning.
|
||||
|
||||
</Tip>
|
||||
|
||||
This guide will show you how to apply prompt tuning to train a [`bloomz-560m`](https://huggingface.co/bigscience/bloomz-560m) model on the `twitter_complaints` subset of the [RAFT](https://huggingface.co/datasets/ought/raft) dataset.
|
||||
|
||||
Before you begin, make sure you have all the necessary libraries installed:
|
||||
|
||||
```bash
|
||||
!pip install -q peft transformers datasets
|
||||
```
|
||||
|
||||
## Setup
|
||||
|
||||
Start by defining the model and tokenizer, the dataset and the dataset columns to train on, some training hyperparameters, and the [`PromptTuningConfig`]. The [`PromptTuningConfig`] contains information about the task type, the text to initialize the prompt embedding, the number of virtual tokens, and the tokenizer to use:
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup
|
||||
from peft import get_peft_config, get_peft_model, PromptTuningInit, PromptTuningConfig, TaskType, PeftType
|
||||
import torch
|
||||
from datasets import load_dataset
|
||||
import os
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm import tqdm
|
||||
|
||||
device = "cuda"
|
||||
model_name_or_path = "bigscience/bloomz-560m"
|
||||
tokenizer_name_or_path = "bigscience/bloomz-560m"
|
||||
peft_config = PromptTuningConfig(
|
||||
task_type=TaskType.CAUSAL_LM,
|
||||
prompt_tuning_init=PromptTuningInit.TEXT,
|
||||
num_virtual_tokens=8,
|
||||
prompt_tuning_init_text="Classify if the tweet is a complaint or not:",
|
||||
tokenizer_name_or_path=model_name_or_path,
|
||||
)
|
||||
|
||||
dataset_name = "twitter_complaints"
|
||||
checkpoint_name = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}_v1.pt".replace(
|
||||
"/", "_"
|
||||
)
|
||||
text_column = "Tweet text"
|
||||
label_column = "text_label"
|
||||
max_length = 64
|
||||
lr = 3e-2
|
||||
num_epochs = 50
|
||||
batch_size = 8
|
||||
```
|
||||
|
||||
## Load dataset
|
||||
|
||||
For this guide, you'll load the `twitter_complaints` subset of the [RAFT](https://huggingface.co/datasets/ought/raft) dataset. This subset contains tweets that are labeled either `complaint` or `no complaint`:
|
||||
|
||||
```py
|
||||
dataset = load_dataset("ought/raft", dataset_name)
|
||||
dataset["train"][0]
|
||||
{"Tweet text": "@HMRCcustomers No this is my first job", "ID": 0, "Label": 2}
|
||||
```
|
||||
|
||||
To make the `Label` column more readable, replace the `Label` value with the corresponding label text and store them in a `text_label` column. You can use the [`~datasets.Dataset.map`] function to apply this change over the entire dataset in one step:
|
||||
|
||||
```py
|
||||
classes = [k.replace("_", " ") for k in dataset["train"].features["Label"].names]
|
||||
dataset = dataset.map(
|
||||
lambda x: {"text_label": [classes[label] for label in x["Label"]]},
|
||||
batched=True,
|
||||
num_proc=1,
|
||||
)
|
||||
{"Tweet text": "@HMRCcustomers No this is my first job", "ID": 0, "Label": 2, "text_label": "no complaint"}
|
||||
```
|
||||
|
||||
## Preprocess dataset
|
||||
|
||||
Next, you'll setup a tokenizer; configure the appropriate padding token to use for padding sequences, and determine the maximum length of the tokenized labels:
|
||||
|
||||
```py
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
||||
if tokenizer.pad_token_id is None:
|
||||
tokenizer.pad_token_id = tokenizer.eos_token_id
|
||||
target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes])
|
||||
print(target_max_length)
|
||||
3
|
||||
```
|
||||
|
||||
Create a `preprocess_function` to:
|
||||
|
||||
1. Tokenize the input text and labels.
|
||||
2. For each example in a batch, pad the labels with the tokenizers `pad_token_id`.
|
||||
3. Concatenate the input text and labels into the `model_inputs`.
|
||||
4. Create a separate attention mask for `labels` and `model_inputs`.
|
||||
5. Loop through each example in the batch again to pad the input ids, labels, and attention mask to the `max_length` and convert them to PyTorch tensors.
|
||||
|
||||
```py
|
||||
def preprocess_function(examples):
|
||||
batch_size = len(examples[text_column])
|
||||
inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]]
|
||||
targets = [str(x) for x in examples[label_column]]
|
||||
model_inputs = tokenizer(inputs)
|
||||
labels = tokenizer(targets)
|
||||
for i in range(batch_size):
|
||||
sample_input_ids = model_inputs["input_ids"][i]
|
||||
label_input_ids = labels["input_ids"][i] + [tokenizer.pad_token_id]
|
||||
# print(i, sample_input_ids, label_input_ids)
|
||||
model_inputs["input_ids"][i] = sample_input_ids + label_input_ids
|
||||
labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids
|
||||
model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i])
|
||||
# print(model_inputs)
|
||||
for i in range(batch_size):
|
||||
sample_input_ids = model_inputs["input_ids"][i]
|
||||
label_input_ids = labels["input_ids"][i]
|
||||
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * (
|
||||
max_length - len(sample_input_ids)
|
||||
) + sample_input_ids
|
||||
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[
|
||||
"attention_mask"
|
||||
][i]
|
||||
labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids
|
||||
model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length])
|
||||
model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length])
|
||||
labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length])
|
||||
model_inputs["labels"] = labels["input_ids"]
|
||||
return model_inputs
|
||||
```
|
||||
|
||||
Use the [`~datasets.Dataset.map`] function to apply the `preprocess_function` to the entire dataset. You can remove the unprocessed columns since the model won't need them:
|
||||
|
||||
```py
|
||||
processed_datasets = dataset.map(
|
||||
preprocess_function,
|
||||
batched=True,
|
||||
num_proc=1,
|
||||
remove_columns=dataset["train"].column_names,
|
||||
load_from_cache_file=False,
|
||||
desc="Running tokenizer on dataset",
|
||||
)
|
||||
```
|
||||
|
||||
Create a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) from the `train` and `eval` datasets. Set `pin_memory=True` to speed up the data transfer to the GPU during training if the samples in your dataset are on a CPU.
|
||||
|
||||
```py
|
||||
train_dataset = processed_datasets["train"]
|
||||
eval_dataset = processed_datasets["train"]
|
||||
|
||||
|
||||
train_dataloader = DataLoader(
|
||||
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
|
||||
)
|
||||
eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
|
||||
```
|
||||
|
||||
## Train
|
||||
|
||||
You're almost ready to setup your model and start training!
|
||||
|
||||
Initialize a base model from [`~transformers.AutoModelForCausalLM`], and pass it and `peft_config` to the [`get_peft_model`] function to create a [`PeftModel`]. You can print the new [`PeftModel`]'s trainable parameters to see how much more efficient it is than training the full parameters of the original model!
|
||||
|
||||
```py
|
||||
model = AutoModelForCausalLM.from_pretrained(model_name_or_path)
|
||||
model = get_peft_model(model, peft_config)
|
||||
print(model.print_trainable_parameters())
|
||||
"trainable params: 8192 || all params: 559222784 || trainable%: 0.0014648902430985358"
|
||||
```
|
||||
|
||||
Setup an optimizer and learning rate scheduler:
|
||||
|
||||
```py
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=0,
|
||||
num_training_steps=(len(train_dataloader) * num_epochs),
|
||||
)
|
||||
```
|
||||
|
||||
Move the model to the GPU, then write a training loop to start training!
|
||||
|
||||
```py
|
||||
model = model.to(device)
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
total_loss = 0
|
||||
for step, batch in enumerate(tqdm(train_dataloader)):
|
||||
batch = {k: v.to(device) for k, v in batch.items()}
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
total_loss += loss.detach().float()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
model.eval()
|
||||
eval_loss = 0
|
||||
eval_preds = []
|
||||
for step, batch in enumerate(tqdm(eval_dataloader)):
|
||||
batch = {k: v.to(device) for k, v in batch.items()}
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
eval_loss += loss.detach().float()
|
||||
eval_preds.extend(
|
||||
tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True)
|
||||
)
|
||||
|
||||
eval_epoch_loss = eval_loss / len(eval_dataloader)
|
||||
eval_ppl = torch.exp(eval_epoch_loss)
|
||||
train_epoch_loss = total_loss / len(train_dataloader)
|
||||
train_ppl = torch.exp(train_epoch_loss)
|
||||
print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
|
||||
```
|
||||
|
||||
## Share model
|
||||
|
||||
You can store and share your model on the Hub if you'd like. Log in to your Hugging Face account and enter your token when prompted:
|
||||
|
||||
```py
|
||||
from huggingface_hub import notebook_login
|
||||
|
||||
notebook_login()
|
||||
```
|
||||
|
||||
Use the [`~transformers.PreTrainedModel.push_to_hub`] function to upload your model to a model repository on the Hub:
|
||||
|
||||
```py
|
||||
peft_model_id = "your-name/bloomz-560m_PROMPT_TUNING_CAUSAL_LM"
|
||||
model.push_to_hub("your-name/bloomz-560m_PROMPT_TUNING_CAUSAL_LM", use_auth_token=True)
|
||||
```
|
||||
|
||||
Once the model is uploaded, you'll see the model file size is only 33.5kB! 🤏
|
||||
|
||||
## Inference
|
||||
|
||||
Let's try the model on a sample input for inference. If you look at the repository you uploaded the model to, you'll see a `adapter_config.json` file. Load this file into [`PeftConfig`] to specify the `peft_type` and `task_type`. Then you can load the prompt tuned model weights, and the configuration into [`~PeftModel.from_pretrained`] to create the [`PeftModel`]:
|
||||
|
||||
```py
|
||||
from peft import PeftModel, PeftConfig
|
||||
|
||||
peft_model_id = "stevhliu/bloomz-560m_PROMPT_TUNING_CAUSAL_LM"
|
||||
|
||||
config = PeftConfig.from_pretrained(peft_model_id)
|
||||
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
|
||||
model = PeftModel.from_pretrained(model, peft_model_id)
|
||||
```
|
||||
|
||||
Grab a tweet and tokenize it:
|
||||
|
||||
```py
|
||||
inputs = tokenizer(
|
||||
f'{text_column} : {"@nationalgridus I have no water and the bill is current and paid. Can you do something about this?"} Label : ',
|
||||
return_tensors="pt",
|
||||
)
|
||||
```
|
||||
|
||||
Put the model on a GPU and *generate* the predicted label:
|
||||
|
||||
```py
|
||||
model.to(device)
|
||||
|
||||
with torch.no_grad():
|
||||
inputs = {k: v.to(device) for k, v in inputs.items()}
|
||||
outputs = model.generate(
|
||||
input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=10, eos_token_id=3
|
||||
)
|
||||
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
|
||||
[
|
||||
"Tweet text : @nationalgridus I have no water and the bill is current and paid. Can you do something about this? Label : complaint"
|
||||
]
|
||||
```
|
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# DreamBooth fine-tuning with LoRA
|
||||
@ -83,6 +87,7 @@ accelerate launch train_dreambooth.py \
|
||||
--output_dir=$OUTPUT_DIR \
|
||||
--train_text_encoder \
|
||||
--with_prior_preservation --prior_loss_weight=1.0 \
|
||||
--num_dataloader_workers=1 \
|
||||
--instance_prompt="a photo of sks dog" \
|
||||
--class_prompt="a photo of dog" \
|
||||
--resolution=512 \
|
||||
@ -101,6 +106,8 @@ accelerate launch train_dreambooth.py \
|
||||
--max_train_steps=800
|
||||
```
|
||||
|
||||
If you are running this script on Windows, you may need to set the `--num_dataloader_workers` to 0.
|
||||
|
||||
## Inference with a single adapter
|
||||
|
||||
To run inference with the fine-tuned model, first specify the base model with which the fine-tuned LoRA weights will be combined:
|
||||
@ -171,7 +178,7 @@ image.save("DESTINATION_PATH_FOR_THE_IMAGE")
|
||||
## Multi-adapter inference
|
||||
|
||||
With PEFT you can combine multiple adapters for inference. In the previous example you have fine-tuned Stable Diffusion on
|
||||
some dog images. The pipeline created based on these weights got a name - `adapter_name="dog`. Now, suppose you also fine-tuned
|
||||
some dog images. The pipeline created based on these weights got a name - `adapter_name="dog"`. Now, suppose you also fine-tuned
|
||||
this base model on images of a crochet toy. Let's see how we can use both adapters.
|
||||
|
||||
First, you'll need to perform all the steps as in the single adapter inference example:
|
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Image classification using LoRA
|
||||
@ -26,7 +30,7 @@ For more information on LoRA, please refer to the [original LoRA paper](https://
|
||||
Install the libraries required for model training:
|
||||
|
||||
```bash
|
||||
!pip install transformers accelerate evaluate datasets loralib peft -q
|
||||
!pip install transformers accelerate evaluate datasets peft -q
|
||||
```
|
||||
|
||||
Check the versions of all required libraries to make sure you are up to date:
|
||||
@ -324,7 +328,7 @@ Bring everything together - model, training arguments, data, collation function,
|
||||
|
||||
```python
|
||||
trainer = Trainer(
|
||||
model,
|
||||
lora_model,
|
||||
args,
|
||||
train_dataset=train_ds,
|
||||
eval_dataset=val_ds,
|
@ -1,3 +1,7 @@
|
||||
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# int8 training for automatic speech recognition
|
||||
|
||||
Quantization reduces the precision of floating point data types, decreasing the memory required to store model weights. However, quantization degrades inference performance because you lose information when you reduce the precision. 8-bit or `int8` quantization uses only a quarter precision, but it does not degrade performance because it doesn't just drop the bits or data. Instead, `int8` quantization *rounds* from one data type to another.
|
||||
@ -205,7 +209,7 @@ Let's also apply LoRA to the training to make it even more efficient. Load a [`~
|
||||
```py
|
||||
from peft import LoraConfig, PeftModel, LoraModel, LoraConfig, get_peft_model
|
||||
|
||||
config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="None")
|
||||
config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none")
|
||||
```
|
||||
|
||||
After you set up the [`~peft.LoraConfig`], wrap it and the base model with the [`get_peft_model`] function to create a [`PeftModel`]. Print out the number of trainable parameters to see how much more efficient LoRA is compared to fully training the model!
|
||||
@ -375,4 +379,4 @@ with torch.cuda.amp.autocast():
|
||||
text = pipe(audio, generate_kwargs={"forced_decoder_ids": forced_decoder_ids}, max_new_tokens=255)["text"]
|
||||
text
|
||||
"मी तुमच्यासाठी काही करू शकतो का?"
|
||||
```
|
||||
```
|
305
docs/source/task_guides/prompt_based_methods.md
Normal file
305
docs/source/task_guides/prompt_based_methods.md
Normal file
@ -0,0 +1,305 @@
|
||||
<!--Copyright 2024 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Prompt-based methods
|
||||
|
||||
A prompt can describe a task or provide an example of a task you want the model to learn. Instead of manually creating these prompts, soft prompting methods add learnable parameters to the input embeddings that can be optimized for a specific task while keeping the pretrained model's parameters frozen. This makes it both faster and easier to finetune large language models (LLMs) for new downstream tasks.
|
||||
|
||||
The PEFT library supports several types of prompting methods (p-tuning, prefix tuning, prompt tuning) and you can learn more about how these methods work conceptually in the [Soft prompts](../conceptual_guides/prompting) guide. If you're interested in applying these methods to other tasks and use cases, take a look at our [notebook collection](https://huggingface.co/spaces/PEFT/soft-prompting)!
|
||||
|
||||
This guide will show you how to train a causal language model - with a soft prompting method - to *generate a classification* for whether a tweet is a complaint or not.
|
||||
|
||||
<Tip>
|
||||
|
||||
Some familiarity with the general process of training a causal language model would be really helpful and allow you to focus on the soft prompting methods. If you're new, we recommend taking a look at the [Causal language modeling](https://huggingface.co/docs/transformers/tasks/language_modeling) guide first from the Transformers documentation. When you're ready, come back and see how easy it is to drop PEFT in to your training!
|
||||
|
||||
</Tip>
|
||||
|
||||
Before you begin, make sure you have all the necessary libraries installed.
|
||||
|
||||
```bash
|
||||
pip install -q peft transformers datasets
|
||||
```
|
||||
|
||||
## Dataset
|
||||
|
||||
For this guide, you'll use the `twitter_complaints` subset of the [RAFT](https://huggingface.co/datasets/ought/raft) dataset. The `twitter_complaints` subset contains tweets labeled as `complaint` and `no complaint` and you can check out the [dataset viewer](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints) for a better idea of what the data looks like.
|
||||
|
||||
Use the [`~datasets.load_dataset`] function to load the dataset and create a new `text_label` column so it is easier to understand what the `Label` values, `1` and `2` mean.
|
||||
|
||||
```py
|
||||
from datasets import load_dataset
|
||||
|
||||
ds = load_dataset("ought/raft", "twitter_complaints")
|
||||
|
||||
classes = [k.replace("_", " ") for k in ds["train"].features["Label"].names]
|
||||
ds = ds.map(
|
||||
lambda x: {"text_label": [classes[label] for label in x["Label"]]},
|
||||
batched=True,
|
||||
num_proc=1,
|
||||
)
|
||||
ds["train"][0]
|
||||
{"Tweet text": "@HMRCcustomers No this is my first job", "ID": 0, "Label": 2, "text_label": "no complaint"}
|
||||
```
|
||||
|
||||
Load a tokenizer, define the padding token to use, and determine the maximum length of the tokenized label.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
|
||||
if tokenizer.pad_token_id is None:
|
||||
tokenizer.pad_token_id = tokenizer.eos_token_id
|
||||
target_max_length = max([len(tokenizer(class_label)["input_ids"]) for class_label in classes])
|
||||
print(target_max_length)
|
||||
```
|
||||
|
||||
Create a preprocessing function that tokenizes the tweet text and labels, pad the inputs and labels in each batch, create an attention mask, and truncate sequences to the `max_length`. Then convert the `input_ids`, `attention_mask`, and `labels` to PyTorch tensors.
|
||||
|
||||
```py
|
||||
import torch
|
||||
|
||||
max_length = 64
|
||||
|
||||
def preprocess_function(examples, text_column="Tweet text", label_column="text_label"):
|
||||
batch_size = len(examples[text_column])
|
||||
inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]]
|
||||
targets = [str(x) for x in examples[label_column]]
|
||||
model_inputs = tokenizer(inputs)
|
||||
labels = tokenizer(targets)
|
||||
classes = [k.replace("_", " ") for k in ds["train"].features["Label"].names]
|
||||
for i in range(batch_size):
|
||||
sample_input_ids = model_inputs["input_ids"][i]
|
||||
label_input_ids = labels["input_ids"][i]
|
||||
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * (
|
||||
max_length - len(sample_input_ids)
|
||||
) + sample_input_ids
|
||||
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[
|
||||
"attention_mask"
|
||||
][i]
|
||||
labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids
|
||||
model_inputs["input_ids"][i] = torch.tensor(model_inputs["input_ids"][i][:max_length])
|
||||
model_inputs["attention_mask"][i] = torch.tensor(model_inputs["attention_mask"][i][:max_length])
|
||||
labels["input_ids"][i] = torch.tensor(labels["input_ids"][i][:max_length])
|
||||
model_inputs["labels"] = labels["input_ids"]
|
||||
return model_inputs
|
||||
```
|
||||
|
||||
Apply the preprocessing function to the entire dataset with the [`~datasets.Dataset.map`] function, and remove the unprocessed columns because the model won't need them.
|
||||
|
||||
```py
|
||||
processed_ds = ds.map(
|
||||
preprocess_function,
|
||||
batched=True,
|
||||
num_proc=1,
|
||||
remove_columns=ds["train"].column_names,
|
||||
load_from_cache_file=False,
|
||||
desc="Running tokenizer on dataset",
|
||||
)
|
||||
```
|
||||
|
||||
Finally, create a training and evaluation [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader). You can set `pin_memory=True` to speed up the data transfer to the GPU during training if the samples in your dataset are on a CPU.
|
||||
|
||||
```py
|
||||
from torch.utils.data import DataLoader
|
||||
from transformers import default_data_collator
|
||||
|
||||
train_ds = processed_ds["train"]
|
||||
eval_ds = processed_ds["test"]
|
||||
|
||||
batch_size = 16
|
||||
|
||||
train_dataloader = DataLoader(train_ds, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
|
||||
eval_dataloader = DataLoader(eval_ds, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
|
||||
```
|
||||
|
||||
## Model
|
||||
|
||||
Now let's load a pretrained model to use as the base model for the soft prompt method. This guide uses the [bigscience/bloomz-560m](https://huggingface.co/bigscience/bloomz-560m) model, but you can use any causal language model you want.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m")
|
||||
```
|
||||
|
||||
### PEFT configuration and model
|
||||
|
||||
For any PEFT method, you'll need to create a configuration which contains all the parameters that specify how the PEFT method should be applied. Once the configuration is setup, pass it to the [`~peft.get_peft_model`] function along with the base model to create a trainable [`PeftModel`].
|
||||
|
||||
<Tip>
|
||||
|
||||
Call the [`~PeftModel.print_trainable_parameters`] method to compare the number of trainable parameters of [`PeftModel`] versus the number of parameters in the base model!
|
||||
|
||||
</Tip>
|
||||
|
||||
<hfoptions id="configurations">
|
||||
<hfoption id="p-tuning">
|
||||
|
||||
[P-tuning](../conceptual_guides/prompting#p-tuning) adds a trainable embedding tensor where the prompt tokens can be added anywhere in the input sequence. Create a [`PromptEncoderConfig`] with the task type, the number of virtual tokens to add and learn, and the hidden size of the encoder for learning the prompt parameters.
|
||||
|
||||
```py
|
||||
from peft import PromptEncoderConfig, get_peft_model
|
||||
|
||||
peft_config = PromptEncoderConfig(task_type="CAUSAL_LM", num_virtual_tokens=20, encoder_hidden_size=128)
|
||||
model = get_peft_model(model, peft_config)
|
||||
model.print_trainable_parameters()
|
||||
"trainable params: 300,288 || all params: 559,514,880 || trainable%: 0.05366935013417338"
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="prefix tuning">
|
||||
|
||||
[Prefix tuning](../conceptual_guides/prompting#prefix-tuning) adds task-specific parameters in all of the model layers, which are optimized by a separate feed-forward network. Create a [`PrefixTuningConfig`] with the task type and number of virtual tokens to add and learn.
|
||||
|
||||
```py
|
||||
from peft import PrefixTuningConfig, get_peft_model
|
||||
|
||||
peft_config = PrefixTuningConfig(task_type="CAUSAL_LM", num_virtual_tokens=20)
|
||||
model = get_peft_model(model, peft_config)
|
||||
model.print_trainable_parameters()
|
||||
"trainable params: 983,040 || all params: 560,197,632 || trainable%: 0.1754809274167014"
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="prompt tuning">
|
||||
|
||||
[Prompt tuning](../conceptual_guides/prompting#prompt-tuning) formulates all tasks as a *generation* task and it adds a task-specific prompt to the input which is updated independently. The `prompt_tuning_init_text` parameter specifies how to finetune the model (in this case, it is classifying whether tweets are complaints or not). For the best results, the `prompt_tuning_init_text` should have the same number of tokens that should be predicted. To do this, you can set `num_virtual_tokens` to the number of tokens of the `prompt_tuning_init_text`.
|
||||
|
||||
Create a [`PromptTuningConfig`] with the task type, the initial prompt tuning text to train the model with, the number of virtual tokens to add and learn, and a tokenizer.
|
||||
|
||||
```py
|
||||
from peft import PromptTuningConfig, PromptTuningInit, get_peft_model
|
||||
|
||||
prompt_tuning_init_text = "Classify if the tweet is a complaint or no complaint.\n"
|
||||
peft_config = PromptTuningConfig(
|
||||
task_type="CAUSAL_LM",
|
||||
prompt_tuning_init=PromptTuningInit.TEXT,
|
||||
num_virtual_tokens=len(tokenizer(prompt_tuning_init_text)["input_ids"]),,
|
||||
prompt_tuning_init_text=prompt_tuning_init_text,
|
||||
tokenizer_name_or_path="bigscience/bloomz-560m",
|
||||
)
|
||||
model = get_peft_model(model, peft_config)
|
||||
model.print_trainable_parameters()
|
||||
"trainable params: 8,192 || all params: 559,222,784 || trainable%: 0.0014648902430985358"
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
### Training
|
||||
|
||||
Set up an optimizer and learning rate scheduler.
|
||||
|
||||
```py
|
||||
from transformers import get_linear_schedule_with_warmup
|
||||
|
||||
lr = 3e-2
|
||||
num_epochs = 50
|
||||
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=0,
|
||||
num_training_steps=(len(train_dataloader) * num_epochs),
|
||||
)
|
||||
```
|
||||
|
||||
Move the model to the GPU and create a training loop that reports the loss and perplexity for each epoch.
|
||||
|
||||
```py
|
||||
from tqdm import tqdm
|
||||
|
||||
device = "cuda"
|
||||
model = model.to(device)
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
total_loss = 0
|
||||
for step, batch in enumerate(tqdm(train_dataloader)):
|
||||
batch = {k: v.to(device) for k, v in batch.items()}
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
total_loss += loss.detach().float()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
model.eval()
|
||||
eval_loss = 0
|
||||
eval_preds = []
|
||||
for step, batch in enumerate(tqdm(eval_dataloader)):
|
||||
batch = {k: v.to(device) for k, v in batch.items()}
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
eval_loss += loss.detach().float()
|
||||
eval_preds.extend(
|
||||
tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True)
|
||||
)
|
||||
|
||||
eval_epoch_loss = eval_loss / len(eval_dataloader)
|
||||
eval_ppl = torch.exp(eval_epoch_loss)
|
||||
train_epoch_loss = total_loss / len(train_dataloader)
|
||||
train_ppl = torch.exp(train_epoch_loss)
|
||||
print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
|
||||
```
|
||||
|
||||
## Share your model
|
||||
|
||||
Once training is complete, you can upload your model to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method. You'll need to login to your Hugging Face account first and enter your token when prompted.
|
||||
|
||||
```py
|
||||
from huggingface_hub import notebook_login
|
||||
|
||||
account = <your-hf-account-name>
|
||||
peft_model_id = f"{account}/bloomz-560-m-peft-method"
|
||||
model.push_to_hub(peft_model_id)
|
||||
```
|
||||
|
||||
If you check the model file size in the repository, you’ll see that it is a lot smaller than a full sized model!
|
||||
|
||||
<div class="flex flex-col justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/PEFT-hub-screenshot.png"/>
|
||||
<figcaption class="text-center">For example, the adapter weights for a opt-350m model stored on the Hub are only ~6MB compared to the full model size which can be ~700MB.</figcaption>
|
||||
</div>
|
||||
|
||||
## Inference
|
||||
|
||||
Let's load the model for inference and test it out on a tweet!
|
||||
|
||||
```py
|
||||
from peft import AutoPeftModelForCausalLM
|
||||
|
||||
model = AutoPeftModelForCausalLM.from_pretrained("peft_model_id").to("cuda")
|
||||
tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m")
|
||||
|
||||
i = 15
|
||||
inputs = tokenizer(f'{text_column} : {ds["test"][i]["Tweet text"]} Label : ', return_tensors="pt")
|
||||
print(ds["test"][i]["Tweet text"])
|
||||
"@NYTsupport i have complained a dozen times & yet my papers are still thrown FAR from my door. Why is this so hard to resolve?"
|
||||
```
|
||||
|
||||
Call the [`~transformers.GenerationMixin.generate`] method to generate the predicted classification label.
|
||||
|
||||
```py
|
||||
with torch.no_grad():
|
||||
inputs = {k: v.to(device) for k, v in inputs.items()}
|
||||
outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10)
|
||||
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
|
||||
"['Tweet text : @NYTsupport i have complained a dozen times & yet my papers are still thrown FAR from my door. Why is this so hard to resolve? Label : complaint']"
|
||||
```
|
@ -1,232 +0,0 @@
|
||||
# P-tuning for sequence classification
|
||||
|
||||
It is challenging to finetune large language models for downstream tasks because they have so many parameters. To work around this, you can use *prompts* to steer the model toward a particular downstream task without fully finetuning a model. Typically, these prompts are handcrafted, which may be impractical because you need very large validation sets to find the best prompts. *P-tuning* is a method for automatically searching and optimizing for better prompts in a continuous space.
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 Read [GPT Understands, Too](https://arxiv.org/abs/2103.10385) to learn more about p-tuning.
|
||||
|
||||
</Tip>
|
||||
|
||||
This guide will show you how to train a [`roberta-large`](https://huggingface.co/roberta-large) model (but you can also use any of the GPT, OPT, or BLOOM models) with p-tuning on the `mrpc` configuration of the [GLUE](https://huggingface.co/datasets/glue) benchmark.
|
||||
|
||||
Before you begin, make sure you have all the necessary libraries installed:
|
||||
|
||||
```bash
|
||||
!pip install -q peft transformers datasets evaluate
|
||||
```
|
||||
|
||||
## Setup
|
||||
|
||||
To get started, import 🤗 Transformers to create the base model, 🤗 Datasets to load a dataset, 🤗 Evaluate to load an evaluation metric, and 🤗 PEFT to create a [`PeftModel`] and setup the configuration for p-tuning.
|
||||
|
||||
Define the model, dataset, and some basic training hyperparameters:
|
||||
|
||||
```py
|
||||
from transformers import (
|
||||
AutoModelForSequenceClassification,
|
||||
AutoTokenizer,
|
||||
DataCollatorWithPadding,
|
||||
TrainingArguments,
|
||||
Trainer,
|
||||
)
|
||||
from peft import (
|
||||
get_peft_config,
|
||||
get_peft_model,
|
||||
get_peft_model_state_dict,
|
||||
set_peft_model_state_dict,
|
||||
PeftType,
|
||||
PromptEncoderConfig,
|
||||
)
|
||||
from datasets import load_dataset
|
||||
import evaluate
|
||||
import torch
|
||||
|
||||
model_name_or_path = "roberta-large"
|
||||
task = "mrpc"
|
||||
num_epochs = 20
|
||||
lr = 1e-3
|
||||
batch_size = 32
|
||||
```
|
||||
|
||||
## Load dataset and metric
|
||||
|
||||
Next, load the `mrpc` configuration - a corpus of sentence pairs labeled according to whether they're semantically equivalent or not - from the [GLUE](https://huggingface.co/datasets/glue) benchmark:
|
||||
|
||||
```py
|
||||
dataset = load_dataset("glue", task)
|
||||
dataset["train"][0]
|
||||
{
|
||||
"sentence1": 'Amrozi accused his brother , whom he called " the witness " , of deliberately distorting his evidence .',
|
||||
"sentence2": 'Referring to him as only " the witness " , Amrozi accused his brother of deliberately distorting his evidence .',
|
||||
"label": 1,
|
||||
"idx": 0,
|
||||
}
|
||||
```
|
||||
|
||||
From 🤗 Evaluate, load a metric for evaluating the model's performance. The evaluation module returns the accuracy and F1 scores associated with this specific task.
|
||||
|
||||
```py
|
||||
metric = evaluate.load("glue", task)
|
||||
```
|
||||
|
||||
Now you can use the `metric` to write a function that computes the accuracy and F1 scores. The `compute_metric` function calculates the scores from the model predictions and labels:
|
||||
|
||||
```py
|
||||
import numpy as np
|
||||
|
||||
|
||||
def compute_metrics(eval_pred):
|
||||
predictions, labels = eval_pred
|
||||
predictions = np.argmax(predictions, axis=1)
|
||||
return metric.compute(predictions=predictions, references=labels)
|
||||
```
|
||||
|
||||
## Preprocess dataset
|
||||
|
||||
Initialize the tokenizer and configure the padding token to use. If you're using a GPT, OPT, or BLOOM model, you should set the `padding_side` to the left; otherwise it'll be set to the right. Tokenize the sentence pairs and truncate them to the maximum length.
|
||||
|
||||
```py
|
||||
if any(k in model_name_or_path for k in ("gpt", "opt", "bloom")):
|
||||
padding_side = "left"
|
||||
else:
|
||||
padding_side = "right"
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=padding_side)
|
||||
if getattr(tokenizer, "pad_token_id") is None:
|
||||
tokenizer.pad_token_id = tokenizer.eos_token_id
|
||||
|
||||
|
||||
def tokenize_function(examples):
|
||||
# max_length=None => use the model max length (it's actually the default)
|
||||
outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, max_length=None)
|
||||
return outputs
|
||||
```
|
||||
|
||||
Use [`~datasets.Dataset.map`] to apply the `tokenize_function` to the dataset, and remove the unprocessed columns because the model won't need those. You should also rename the `label` column to `labels` because that is the expected name for the labels by models in the 🤗 Transformers library.
|
||||
|
||||
```py
|
||||
tokenized_datasets = dataset.map(
|
||||
tokenize_function,
|
||||
batched=True,
|
||||
remove_columns=["idx", "sentence1", "sentence2"],
|
||||
)
|
||||
|
||||
tokenized_datasets = tokenized_datasets.rename_column("label", "labels")
|
||||
```
|
||||
|
||||
Create a collator function with [`~transformers.DataCollatorWithPadding`] to pad the examples in the batches to the `longest` sequence in the batch:
|
||||
|
||||
```py
|
||||
data_collator = DataCollatorWithPadding(tokenizer=tokenizer, padding="longest")
|
||||
```
|
||||
|
||||
## Train
|
||||
|
||||
P-tuning uses a prompt encoder to optimize the prompt parameters, so you'll need to initialize the [`PromptEncoderConfig`] with several arguments:
|
||||
|
||||
- `task_type`: the type of task you're training on, in this case it is sequence classification or `SEQ_CLS`
|
||||
- `num_virtual_tokens`: the number of virtual tokens to use, or in other words, the prompt
|
||||
- `encoder_hidden_size`: the hidden size of the encoder used to optimize the prompt parameters
|
||||
|
||||
```py
|
||||
peft_config = PromptEncoderConfig(task_type="SEQ_CLS", num_virtual_tokens=20, encoder_hidden_size=128)
|
||||
```
|
||||
|
||||
Create the base `roberta-large` model from [`~transformers.AutoModelForSequenceClassification`], and then wrap the base model and `peft_config` with [`get_peft_model`] to create a [`PeftModel`]. If you're curious to see how many parameters you're actually training compared to training on all the model parameters, you can print it out with [`~peft.PeftModel.print_trainable_parameters`]:
|
||||
|
||||
```py
|
||||
model = AutoModelForSequenceClassification.from_pretrained(model_name_or_path, return_dict=True)
|
||||
model = get_peft_model(model, peft_config)
|
||||
model.print_trainable_parameters()
|
||||
"trainable params: 1351938 || all params: 355662082 || trainable%: 0.38011867680626127"
|
||||
```
|
||||
|
||||
From the 🤗 Transformers library, set up the [`~transformers.TrainingArguments`] class with where you want to save the model to, the training hyperparameters, how to evaluate the model, and when to save the checkpoints:
|
||||
|
||||
```py
|
||||
training_args = TrainingArguments(
|
||||
output_dir="your-name/roberta-large-peft-p-tuning",
|
||||
learning_rate=1e-3,
|
||||
per_device_train_batch_size=32,
|
||||
per_device_eval_batch_size=32,
|
||||
num_train_epochs=2,
|
||||
weight_decay=0.01,
|
||||
evaluation_strategy="epoch",
|
||||
save_strategy="epoch",
|
||||
load_best_model_at_end=True,
|
||||
)
|
||||
```
|
||||
|
||||
Then pass the model, `TrainingArguments`, datasets, tokenizer, data collator, and evaluation function to the [`~transformers.Trainer`] class, which'll handle the entire training loop for you. Once you're ready, call [`~transformers.Trainer.train`] to start training!
|
||||
|
||||
```py
|
||||
trainer = Trainer(
|
||||
model=model,
|
||||
args=training_args,
|
||||
train_dataset=tokenized_datasets["train"],
|
||||
eval_dataset=tokenized_datasets["test"],
|
||||
tokenizer=tokenizer,
|
||||
data_collator=data_collator,
|
||||
compute_metrics=compute_metrics,
|
||||
)
|
||||
|
||||
trainer.train()
|
||||
```
|
||||
|
||||
## Share model
|
||||
|
||||
You can store and share your model on the Hub if you'd like. Log in to your Hugging Face account and enter your token when prompted:
|
||||
|
||||
```py
|
||||
from huggingface_hub import notebook_login
|
||||
|
||||
notebook_login()
|
||||
```
|
||||
|
||||
Upload the model to a specifc model repository on the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] function:
|
||||
|
||||
```py
|
||||
model.push_to_hub("your-name/roberta-large-peft-p-tuning", use_auth_token=True)
|
||||
```
|
||||
|
||||
## Inference
|
||||
|
||||
Once the model has been uploaded to the Hub, anyone can easily use it for inference. Load the configuration and model:
|
||||
|
||||
```py
|
||||
import torch
|
||||
from peft import PeftModel, PeftConfig
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
peft_model_id = "smangrul/roberta-large-peft-p-tuning"
|
||||
config = PeftConfig.from_pretrained(peft_model_id)
|
||||
inference_model = AutoModelForSequenceClassification.from_pretrained(config.base_model_name_or_path)
|
||||
tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
||||
model = PeftModel.from_pretrained(inference_model, peft_model_id)
|
||||
```
|
||||
|
||||
Get some text and tokenize it:
|
||||
|
||||
```py
|
||||
classes = ["not equivalent", "equivalent"]
|
||||
|
||||
sentence1 = "Coast redwood trees are the tallest trees on the planet and can grow over 300 feet tall."
|
||||
sentence2 = "The coast redwood trees, which can attain a height of over 300 feet, are the tallest trees on earth."
|
||||
|
||||
inputs = tokenizer(sentence1, sentence2, truncation=True, padding="longest", return_tensors="pt")
|
||||
```
|
||||
|
||||
Pass the inputs to the model to classify the sentences:
|
||||
|
||||
```py
|
||||
with torch.no_grad():
|
||||
outputs = model(**inputs).logits
|
||||
print(outputs)
|
||||
|
||||
paraphrased_text = torch.softmax(outputs, dim=1).tolist()[0]
|
||||
for i in range(len(classes)):
|
||||
print(f"{classes[i]}: {int(round(paraphrased_text[i] * 100))}%")
|
||||
"not equivalent: 4%"
|
||||
"equivalent: 96%"
|
||||
```
|
297
docs/source/task_guides/semantic-similarity-lora.md
Normal file
297
docs/source/task_guides/semantic-similarity-lora.md
Normal file
@ -0,0 +1,297 @@
|
||||
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# LoRA for semantic similarity tasks
|
||||
|
||||
Low-Rank Adaptation (LoRA) is a reparametrization method that aims to reduce the number of trainable parameters with low-rank representations. The weight matrix is broken down into low-rank matrices that are trained and updated. All the pretrained model parameters remain frozen. After training, the low-rank matrices are added back to the original weights. This makes it more efficient to store and train a LoRA model because there are significantly fewer parameters.
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 Read [LoRA: Low-Rank Adaptation of Large Language Models](https://arxiv.org/abs/2106.09685) to learn more about LoRA.
|
||||
|
||||
</Tip>
|
||||
|
||||
In this guide, we'll be using a LoRA [script](https://github.com/huggingface/peft/tree/main/examples/lora_dreambooth) to fine-tune a [`intfloat/e5-large-v2`](https://huggingface.co/intfloat/e5-large-v2) model on the [`smangrul/amazon_esci`](https://huggingface.co/datasets/smangrul/amazon_esci) dataset for semantic similarity tasks. Feel free to explore the script to learn how things work in greater detail!
|
||||
|
||||
## Setup
|
||||
|
||||
Start by installing 🤗 PEFT from [source](https://github.com/huggingface/peft), and then navigate to the directory containing the training scripts for fine-tuning DreamBooth with LoRA:
|
||||
|
||||
```bash
|
||||
cd peft/examples/feature_extraction
|
||||
```
|
||||
|
||||
Install all the necessary required libraries with:
|
||||
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Next, import all the necessary libraries:
|
||||
|
||||
- 🤗 Transformers for loading the `intfloat/e5-large-v2` model and tokenizer
|
||||
- 🤗 Accelerate for the training loop
|
||||
- 🤗 Datasets for loading and preparing the `smangrul/amazon_esci` dataset for training and inference
|
||||
- 🤗 Evaluate for evaluating the model's performance
|
||||
- 🤗 PEFT for setting up the LoRA configuration and creating the PEFT model
|
||||
- 🤗 huggingface_hub for uploading the trained model to HF hub
|
||||
- hnswlib for creating the search index and doing fast approximate nearest neighbor search
|
||||
|
||||
<Tip>
|
||||
|
||||
It is assumed that PyTorch with CUDA support is already installed.
|
||||
|
||||
</Tip>
|
||||
|
||||
## Train
|
||||
|
||||
Launch the training script with `accelerate launch` and pass your hyperparameters along with the `--use_peft` argument to enable LoRA.
|
||||
|
||||
This guide uses the following [`LoraConfig`]:
|
||||
|
||||
```py
|
||||
peft_config = LoraConfig(
|
||||
r=8,
|
||||
lora_alpha=16,
|
||||
bias="none",
|
||||
task_type=TaskType.FEATURE_EXTRACTION,
|
||||
target_modules=["key", "query", "value"],
|
||||
)
|
||||
```
|
||||
|
||||
Here's what a full set of script arguments may look like when running in Colab on a V100 GPU with standard RAM:
|
||||
|
||||
```bash
|
||||
accelerate launch \
|
||||
--mixed_precision="fp16" \
|
||||
peft_lora_embedding_semantic_search.py \
|
||||
--dataset_name="smangrul/amazon_esci" \
|
||||
--max_length=70 --model_name_or_path="intfloat/e5-large-v2" \
|
||||
--per_device_train_batch_size=64 \
|
||||
--per_device_eval_batch_size=128 \
|
||||
--learning_rate=5e-4 \
|
||||
--weight_decay=0.0 \
|
||||
--num_train_epochs 3 \
|
||||
--gradient_accumulation_steps=1 \
|
||||
--output_dir="results/peft_lora_e5_ecommerce_semantic_search_colab" \
|
||||
--seed=42 \
|
||||
--push_to_hub \
|
||||
--hub_model_id="smangrul/peft_lora_e5_ecommerce_semantic_search_colab" \
|
||||
--with_tracking \
|
||||
--report_to="wandb" \
|
||||
--use_peft \
|
||||
--checkpointing_steps "epoch"
|
||||
```
|
||||
|
||||
## Dataset for semantic similarity
|
||||
|
||||
The dataset we'll be using is a small subset of the [esci-data](https://github.com/amazon-science/esci-data.git) dataset (it can be found on Hub at [smangrul/amazon_esci](https://huggingface.co/datasets/smangrul/amazon_esci)).
|
||||
Each sample contains a tuple of `(query, product_title, relevance_label)` where `relevance_label` is `1` if the product matches the intent of the `query`, otherwise it is `0`.
|
||||
|
||||
Our task is to build an embedding model that can retrieve semantically similar products given a product query.
|
||||
This is usually the first stage in building a product search engine to retrieve all the potentially relevant products of a given query.
|
||||
Typically, this involves using Bi-Encoder models to cross-join the query and millions of products which could blow up quickly.
|
||||
Instead, you can use a Transformer model to retrieve the top K nearest similar products for a given query by
|
||||
embedding the query and products in the same latent embedding space.
|
||||
The millions of products are embedded offline to create a search index.
|
||||
At run time, only the query is embedded by the model, and products are retrieved from the search index with a
|
||||
fast approximate nearest neighbor search library such as [FAISS](https://github.com/facebookresearch/faiss) or [HNSWlib](https://github.com/nmslib/hnswlib).
|
||||
|
||||
|
||||
The next stage involves reranking the retrieved list of products to return the most relevant ones;
|
||||
this stage can utilize cross-encoder based models as the cross-join between the query and a limited set of retrieved products.
|
||||
The diagram below from [awesome-semantic-search](https://github.com/rom1504/awesome-semantic-search) outlines a rough semantic search pipeline:
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/semantic_search_pipeline.png"
|
||||
alt="Semantic Search Pipeline"/>
|
||||
</div>
|
||||
|
||||
For this task guide, we will explore the first stage of training an embedding model to predict semantically similar products
|
||||
given a product query.
|
||||
|
||||
## Training script deep dive
|
||||
|
||||
We finetune [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) which tops the [MTEB benchmark](https://huggingface.co/spaces/mteb/leaderboard) using PEFT-LoRA.
|
||||
|
||||
[`AutoModelForSentenceEmbedding`] returns the query and product embeddings, and the `mean_pooling` function pools them across the sequence dimension and normalizes them:
|
||||
|
||||
```py
|
||||
class AutoModelForSentenceEmbedding(nn.Module):
|
||||
def __init__(self, model_name, tokenizer, normalize=True):
|
||||
super(AutoModelForSentenceEmbedding, self).__init__()
|
||||
|
||||
self.model = AutoModel.from_pretrained(model_name)
|
||||
self.normalize = normalize
|
||||
self.tokenizer = tokenizer
|
||||
|
||||
def forward(self, **kwargs):
|
||||
model_output = self.model(**kwargs)
|
||||
embeddings = self.mean_pooling(model_output, kwargs["attention_mask"])
|
||||
if self.normalize:
|
||||
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
||||
|
||||
return embeddings
|
||||
|
||||
def mean_pooling(self, model_output, attention_mask):
|
||||
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
|
||||
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
||||
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
||||
|
||||
def __getattr__(self, name: str):
|
||||
"""Forward missing attributes to the wrapped module."""
|
||||
try:
|
||||
return super().__getattr__(name) # defer to nn.Module's logic
|
||||
except AttributeError:
|
||||
return getattr(self.model, name)
|
||||
|
||||
|
||||
def get_cosine_embeddings(query_embs, product_embs):
|
||||
return torch.sum(query_embs * product_embs, axis=1)
|
||||
|
||||
|
||||
def get_loss(cosine_score, labels):
|
||||
return torch.mean(torch.square(labels * (1 - cosine_score) + torch.clamp((1 - labels) * cosine_score, min=0.0)))
|
||||
```
|
||||
|
||||
The `get_cosine_embeddings` function computes the cosine similarity and the `get_loss` function computes the loss. The loss enables the model to learn that a cosine score of `1` for query and product pairs is relevant, and a cosine score of `0` or below is irrelevant.
|
||||
|
||||
Define the [`PeftConfig`] with your LoRA hyperparameters, and create a [`PeftModel`]. We use 🤗 Accelerate for handling all device management, mixed precision training, gradient accumulation, WandB tracking, and saving/loading utilities.
|
||||
|
||||
## Results
|
||||
|
||||
The table below compares the training time, the batch size that could be fit in Colab, and the best ROC-AUC scores between a PEFT model and a fully fine-tuned model:
|
||||
|
||||
|
||||
| Training Type | Training time per epoch (Hrs) | Batch Size that fits | ROC-AUC score (higher is better) |
|
||||
| ----------------- | ------------- | ---------- | -------- |
|
||||
| Pre-Trained e5-large-v2 | - | - | 0.68 |
|
||||
| PEFT | 1.73 | 64 | 0.787 |
|
||||
| Full Fine-Tuning | 2.33 | 32 | 0.7969 |
|
||||
|
||||
The PEFT-LoRA model trains **1.35X** faster and can fit **2X** batch size compared to the fully fine-tuned model, and the performance of PEFT-LoRA is comparable to the fully fine-tuned model with a relative drop of **-1.24%** in ROC-AUC. This gap can probably be closed with bigger models as mentioned in [The Power of Scale for Parameter-Efficient Prompt Tuning
|
||||
](https://huggingface.co/papers/2104.08691).
|
||||
|
||||
## Inference
|
||||
|
||||
Let's go! Now we have the model, we need to create a search index of all the products in our catalog.
|
||||
Please refer to `peft_lora_embedding_semantic_similarity_inference.ipynb` for the complete inference code.
|
||||
|
||||
1. Get a list of ids to products which we can call `ids_to_products_dict`:
|
||||
|
||||
```bash
|
||||
{0: 'RamPro 10" All Purpose Utility Air Tires/Wheels with a 5/8" Diameter Hole with Double Sealed Bearings (Pack of 2)',
|
||||
1: 'MaxAuto 2-Pack 13x5.00-6 2PLY Turf Mower Tractor Tire with Yellow Rim, (3" Centered Hub, 3/4" Bushings )',
|
||||
2: 'NEIKO 20601A 14.5 inch Steel Tire Spoon Lever Iron Tool Kit | Professional Tire Changing Tool for Motorcycle, Dirt Bike, Lawn Mower | 3 pcs Tire Spoons | 3 Rim Protector | Valve Tool | 6 Valve Cores',
|
||||
3: '2PK 13x5.00-6 13x5.00x6 13x5x6 13x5-6 2PLY Turf Mower Tractor Tire with Gray Rim',
|
||||
4: '(Set of 2) 15x6.00-6 Husqvarna/Poulan Tire Wheel Assy .75" Bearing',
|
||||
5: 'MaxAuto 2 Pcs 16x6.50-8 Lawn Mower Tire for Garden Tractors Ridings, 4PR, Tubeless',
|
||||
6: 'Dr.Roc Tire Spoon Lever Dirt Bike Lawn Mower Motorcycle Tire Changing Tools with Durable Bag 3 Tire Irons 2 Rim Protectors 1 Valve Stems Set TR412 TR413',
|
||||
7: 'MARASTAR 21446-2PK 15x6.00-6" Front Tire Assembly Replacement-Craftsman Mower, Pack of 2',
|
||||
8: '15x6.00-6" Front Tire Assembly Replacement for 100 and 300 Series John Deere Riding Mowers - 2 pack',
|
||||
9: 'Honda HRR Wheel Kit (2 Front 44710-VL0-L02ZB, 2 Back 42710-VE2-M02ZE)',
|
||||
10: 'Honda 42710-VE2-M02ZE (Replaces 42710-VE2-M01ZE) Lawn Mower Rear Wheel Set of 2' ...
|
||||
```
|
||||
|
||||
2. Use the trained [smangrul/peft_lora_e5_ecommerce_semantic_search_colab](https://huggingface.co/smangrul/peft_lora_e5_ecommerce_semantic_search_colab) model to get the product embeddings:
|
||||
|
||||
```py
|
||||
# base model
|
||||
model = AutoModelForSentenceEmbedding(model_name_or_path, tokenizer)
|
||||
|
||||
# peft config and wrapping
|
||||
model = PeftModel.from_pretrained(model, peft_model_id)
|
||||
|
||||
device = "cuda"
|
||||
model.to(device)
|
||||
model.eval()
|
||||
model = model.merge_and_unload()
|
||||
|
||||
import numpy as np
|
||||
num_products= len(dataset)
|
||||
d = 1024
|
||||
|
||||
product_embeddings_array = np.zeros((num_products, d))
|
||||
for step, batch in enumerate(tqdm(dataloader)):
|
||||
with torch.no_grad():
|
||||
with torch.amp.autocast(dtype=torch.bfloat16, device_type="cuda"):
|
||||
product_embs = model(**{k:v.to(device) for k, v in batch.items()}).detach().float().cpu()
|
||||
start_index = step*batch_size
|
||||
end_index = start_index+batch_size if (start_index+batch_size) < num_products else num_products
|
||||
product_embeddings_array[start_index:end_index] = product_embs
|
||||
del product_embs, batch
|
||||
```
|
||||
|
||||
3. Create a search index using HNSWlib:
|
||||
|
||||
```py
|
||||
def construct_search_index(dim, num_elements, data):
|
||||
# Declaring index
|
||||
search_index = hnswlib.Index(space = 'ip', dim = dim) # possible options are l2, cosine or ip
|
||||
|
||||
# Initializing index - the maximum number of elements should be known beforehand
|
||||
search_index.init_index(max_elements = num_elements, ef_construction = 200, M = 100)
|
||||
|
||||
# Element insertion (can be called several times):
|
||||
ids = np.arange(num_elements)
|
||||
search_index.add_items(data, ids)
|
||||
|
||||
return search_index
|
||||
|
||||
product_search_index = construct_search_index(d, num_products, product_embeddings_array)
|
||||
```
|
||||
|
||||
4. Get the query embeddings and nearest neighbors:
|
||||
|
||||
```py
|
||||
def get_query_embeddings(query, model, tokenizer, device):
|
||||
inputs = tokenizer(query, padding="max_length", max_length=70, truncation=True, return_tensors="pt")
|
||||
model.eval()
|
||||
with torch.no_grad():
|
||||
query_embs = model(**{k:v.to(device) for k, v in inputs.items()}).detach().cpu()
|
||||
return query_embs[0]
|
||||
|
||||
|
||||
def get_nearest_neighbours(k, search_index, query_embeddings, ids_to_products_dict, threshold=0.7):
|
||||
# Controlling the recall by setting ef:
|
||||
search_index.set_ef(100) # ef should always be > k
|
||||
|
||||
# Query dataset, k - number of the closest elements (returns 2 numpy arrays)
|
||||
labels, distances = search_index.knn_query(query_embeddings, k = k)
|
||||
|
||||
return [(ids_to_products_dict[label], (1-distance)) for label, distance in zip(labels[0], distances[0]) if (1-distance)>=threshold]
|
||||
```
|
||||
|
||||
5. Let's test it out with the query `deep learning books`:
|
||||
|
||||
```py
|
||||
query = "deep learning books"
|
||||
k = 10
|
||||
query_embeddings = get_query_embeddings(query, model, tokenizer, device)
|
||||
search_results = get_nearest_neighbours(k, product_search_index, query_embeddings, ids_to_products_dict, threshold=0.7)
|
||||
|
||||
print(f"{query=}")
|
||||
for product, cosine_sim_score in search_results:
|
||||
print(f"cosine_sim_score={round(cosine_sim_score,2)} {product=}")
|
||||
```
|
||||
|
||||
Output:
|
||||
|
||||
```bash
|
||||
query='deep learning books'
|
||||
cosine_sim_score=0.95 product='Deep Learning (The MIT Press Essential Knowledge series)'
|
||||
cosine_sim_score=0.93 product='Practical Deep Learning: A Python-Based Introduction'
|
||||
cosine_sim_score=0.9 product='Hands-On Machine Learning with Scikit-Learn and TensorFlow: Concepts, Tools, and Techniques to Build Intelligent Systems'
|
||||
cosine_sim_score=0.9 product='Machine Learning: A Hands-On, Project-Based Introduction to Machine Learning for Absolute Beginners: Mastering Engineering ML Systems using Scikit-Learn and TensorFlow'
|
||||
cosine_sim_score=0.9 product='Mastering Machine Learning on AWS: Advanced machine learning in Python using SageMaker, Apache Spark, and TensorFlow'
|
||||
cosine_sim_score=0.9 product='The Hundred-Page Machine Learning Book'
|
||||
cosine_sim_score=0.89 product='Hands-On Machine Learning with Scikit-Learn, Keras, and TensorFlow: Concepts, Tools, and Techniques to Build Intelligent Systems'
|
||||
cosine_sim_score=0.89 product='Machine Learning: A Journey from Beginner to Advanced Including Deep Learning, Scikit-learn and Tensorflow'
|
||||
cosine_sim_score=0.88 product='Mastering Machine Learning with scikit-learn'
|
||||
cosine_sim_score=0.88 product='Mastering Machine Learning with scikit-learn - Second Edition: Apply effective learning algorithms to real-world problems using scikit-learn'
|
||||
```
|
||||
|
||||
Books on deep learning and machine learning are retrieved even though `machine learning` wasn't included in the query. This means the model has learned that these books are semantically relevant to the query based on the purchase behavior of customers on Amazon.
|
||||
|
||||
The next steps would ideally involve using ONNX/TensorRT to optimize the model and using a Triton server to host it. Check out 🤗 [Optimum](https://huggingface.co/docs/optimum/index) for related optimizations for efficient serving!
|
@ -8,6 +8,10 @@ http://www.apache.org/licenses/LICENSE-2.0
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# Semantic segmentation using LoRA
|
||||
@ -26,7 +30,7 @@ For more information on LoRA, please refer to the [original LoRA paper](https://
|
||||
Install the libraries required for model training:
|
||||
|
||||
```bash
|
||||
!pip install transformers accelerate evaluate datasets loralib peft -q
|
||||
!pip install transformers accelerate evaluate datasets peft -q
|
||||
```
|
||||
|
||||
## Authenticate to share your model
|
||||
@ -80,14 +84,14 @@ num_labels = len(id2label)
|
||||
## Prepare datasets for training and evaluation
|
||||
|
||||
Next, load the SegFormer image processor to prepare the images and annotations for the model. This dataset uses the
|
||||
zero-index as the background class, so make sure to set `reduce_labels=True` to subtract one from all labels since the
|
||||
zero-index as the background class, so make sure to set `do_reduce_labels=True` to subtract one from all labels since the
|
||||
background class is not among the 150 classes.
|
||||
|
||||
```python
|
||||
from transformers import AutoImageProcessor
|
||||
|
||||
checkpoint = "nvidia/mit-b0"
|
||||
image_processor = AutoImageProcessor.from_pretrained(checkpoint, reduce_labels=True)
|
||||
image_processor = AutoImageProcessor.from_pretrained(checkpoint, do_reduce_labels=True)
|
||||
```
|
||||
|
||||
Add a function to apply data augmentation to the images, so that the model is more robust against overfitting. Here we use the
|
||||
@ -180,7 +184,7 @@ def compute_metrics(eval_pred):
|
||||
references=labels,
|
||||
num_labels=len(id2label),
|
||||
ignore_index=0,
|
||||
reduce_labels=image_processor.reduce_labels,
|
||||
reduce_labels=image_processor.do_reduce_labels,
|
||||
)
|
||||
|
||||
per_category_accuracy = metrics.pop("per_category_accuracy").tolist()
|
@ -1,251 +0,0 @@
|
||||
# Prefix tuning for conditional generation
|
||||
|
||||
[[open-in-colab]]
|
||||
|
||||
Prefix tuning is an additive method where only a sequence of continuous task-specific vectors is attached to the beginning of the input, or *prefix*. Only the prefix parameters are optimized and added to the hidden states in every layer of the model. The tokens of the input sequence can still attend to the prefix as *virtual tokens*. As a result, prefix tuning stores 1000x fewer parameters than a fully finetuned model, which means you can use one large language model for many tasks.
|
||||
|
||||
<Tip>
|
||||
|
||||
💡 Read [Prefix-Tuning: Optimizing Continuous Prompts for Generation](https://arxiv.org/abs/2101.00190) to learn more about prefix tuning.
|
||||
|
||||
</Tip>
|
||||
|
||||
This guide will show you how to apply prefix tuning to train a [`t5-large`](https://huggingface.co/t5-large) model on the `sentences_allagree` subset of the [financial_phrasebank](https://huggingface.co/datasets/financial_phrasebank) dataset.
|
||||
|
||||
Before you begin, make sure you have all the necessary libraries installed:
|
||||
|
||||
```bash
|
||||
!pip install -q peft transformers datasets
|
||||
```
|
||||
|
||||
## Setup
|
||||
|
||||
Start by defining the model and tokenizer, text and label columns, and some hyperparameters so it'll be easier to start training faster later. Set the environment variable `TOKENIZERS_PARALLELSIM` to `false` to disable the fast Rust-based tokenizer which processes data in parallel by default so you can use multiprocessing in Python.
|
||||
|
||||
```py
|
||||
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, default_data_collator, get_linear_schedule_with_warmup
|
||||
from peft import get_peft_config, get_peft_model, get_peft_model_state_dict, PrefixTuningConfig, TaskType
|
||||
from datasets import load_dataset
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm import tqdm
|
||||
import os
|
||||
|
||||
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
|
||||
|
||||
device = "cuda"
|
||||
model_name_or_path = "t5-large"
|
||||
tokenizer_name_or_path = "t5-large"
|
||||
|
||||
text_column = "sentence"
|
||||
label_column = "text_label"
|
||||
max_length = 128
|
||||
lr = 1e-2
|
||||
num_epochs = 5
|
||||
batch_size = 8
|
||||
```
|
||||
|
||||
## Load dataset
|
||||
|
||||
For this guide, you'll train on the `sentences_allagree` subset of the [`financial_phrasebank`](https://huggingface.co/datasets/financial_phrasebank) dataset. This dataset contains financial news categorized by sentiment.
|
||||
|
||||
Use 🤗 [Datasets](https://huggingface.co/docs/datasets/index) [`~datasets.Dataset.train_test_split`] function to create a training and validation split and convert the `label` value to the more readable `text_label`. All of the changes can be applied with the [`~datasets.Dataset.map`] function:
|
||||
|
||||
```py
|
||||
from datasets import load_dataset
|
||||
|
||||
dataset = load_dataset("financial_phrasebank", "sentences_allagree")
|
||||
dataset = dataset["train"].train_test_split(test_size=0.1)
|
||||
dataset["validation"] = dataset["test"]
|
||||
del dataset["test"]
|
||||
|
||||
classes = dataset["train"].features["label"].names
|
||||
dataset = dataset.map(
|
||||
lambda x: {"text_label": [classes[label] for label in x["label"]]},
|
||||
batched=True,
|
||||
num_proc=1,
|
||||
)
|
||||
|
||||
dataset["train"][0]
|
||||
{"sentence": "Profit before taxes was EUR 4.0 mn , down from EUR 4.9 mn .", "label": 0, "text_label": "negative"}
|
||||
```
|
||||
|
||||
## Preprocess dataset
|
||||
|
||||
Initialize a tokenizer, and create a function to pad and truncate the `model_inputs` and `labels`:
|
||||
|
||||
```py
|
||||
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
||||
|
||||
|
||||
def preprocess_function(examples):
|
||||
inputs = examples[text_column]
|
||||
targets = examples[label_column]
|
||||
model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt")
|
||||
labels = tokenizer(targets, max_length=2, padding="max_length", truncation=True, return_tensors="pt")
|
||||
labels = labels["input_ids"]
|
||||
labels[labels == tokenizer.pad_token_id] = -100
|
||||
model_inputs["labels"] = labels
|
||||
return model_inputs
|
||||
```
|
||||
|
||||
Use the [`~datasets.Dataset.map`] function to apply the `preprocess_function` to the dataset. You can remove the unprocessed columns since the model doesn't need them anymore:
|
||||
|
||||
```py
|
||||
processed_datasets = dataset.map(
|
||||
preprocess_function,
|
||||
batched=True,
|
||||
num_proc=1,
|
||||
remove_columns=dataset["train"].column_names,
|
||||
load_from_cache_file=False,
|
||||
desc="Running tokenizer on dataset",
|
||||
)
|
||||
```
|
||||
|
||||
Create a [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) from the `train` and `eval` datasets. Set `pin_memory=True` to speed up the data transfer to the GPU during training if the samples in your dataset are on a CPU.
|
||||
|
||||
```py
|
||||
train_dataset = processed_datasets["train"]
|
||||
eval_dataset = processed_datasets["validation"]
|
||||
|
||||
train_dataloader = DataLoader(
|
||||
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True
|
||||
)
|
||||
eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)
|
||||
```
|
||||
|
||||
## Train model
|
||||
|
||||
Now you can setup your model and make sure it is ready for training. Specify the task in [`PrefixTuningConfig`], create the base `t5-large` model from [`~transformers.AutoModelForSeq2SeqLM`], and then wrap the model and configuration in a [`PeftModel`]. Feel free to print the [`PeftModel`]'s parameters and compare it to fully training all the model parameters to see how much more efficient it is!
|
||||
|
||||
```py
|
||||
peft_config = PrefixTuningConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, num_virtual_tokens=20)
|
||||
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
|
||||
model = get_peft_model(model, peft_config)
|
||||
model.print_trainable_parameters()
|
||||
"trainable params: 983040 || all params: 738651136 || trainable%: 0.13308583065659835"
|
||||
```
|
||||
|
||||
Setup the optimizer and learning rate scheduler:
|
||||
|
||||
```py
|
||||
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
|
||||
lr_scheduler = get_linear_schedule_with_warmup(
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=0,
|
||||
num_training_steps=(len(train_dataloader) * num_epochs),
|
||||
)
|
||||
```
|
||||
|
||||
Move the model to the GPU, and then write a training loop to begin!
|
||||
|
||||
```py
|
||||
model = model.to(device)
|
||||
|
||||
for epoch in range(num_epochs):
|
||||
model.train()
|
||||
total_loss = 0
|
||||
for step, batch in enumerate(tqdm(train_dataloader)):
|
||||
batch = {k: v.to(device) for k, v in batch.items()}
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
total_loss += loss.detach().float()
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
model.eval()
|
||||
eval_loss = 0
|
||||
eval_preds = []
|
||||
for step, batch in enumerate(tqdm(eval_dataloader)):
|
||||
batch = {k: v.to(device) for k, v in batch.items()}
|
||||
with torch.no_grad():
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
eval_loss += loss.detach().float()
|
||||
eval_preds.extend(
|
||||
tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True)
|
||||
)
|
||||
|
||||
eval_epoch_loss = eval_loss / len(eval_dataloader)
|
||||
eval_ppl = torch.exp(eval_epoch_loss)
|
||||
train_epoch_loss = total_loss / len(train_dataloader)
|
||||
train_ppl = torch.exp(train_epoch_loss)
|
||||
print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}")
|
||||
```
|
||||
|
||||
Let's see how well the model performs on the validation set:
|
||||
|
||||
```py
|
||||
correct = 0
|
||||
total = 0
|
||||
for pred, true in zip(eval_preds, dataset["validation"]["text_label"]):
|
||||
if pred.strip() == true.strip():
|
||||
correct += 1
|
||||
total += 1
|
||||
accuracy = correct / total * 100
|
||||
print(f"{accuracy=} % on the evaluation dataset")
|
||||
print(f"{eval_preds[:10]=}")
|
||||
print(f"{dataset['validation']['text_label'][:10]=}")
|
||||
"accuracy=97.3568281938326 % on the evaluation dataset"
|
||||
"eval_preds[:10]=['neutral', 'positive', 'neutral', 'positive', 'neutral', 'negative', 'negative', 'neutral', 'neutral', 'neutral']"
|
||||
"dataset['validation']['text_label'][:10]=['neutral', 'positive', 'neutral', 'positive', 'neutral', 'negative', 'negative', 'neutral', 'neutral', 'neutral']"
|
||||
```
|
||||
|
||||
97% accuracy in just a few minutes; pretty good!
|
||||
|
||||
## Share model
|
||||
|
||||
You can store and share your model on the Hub if you'd like. Login to your Hugging Face account and enter your token when prompted:
|
||||
|
||||
```py
|
||||
from huggingface_hub import notebook_login
|
||||
|
||||
notebook_login()
|
||||
```
|
||||
|
||||
Upload the model to a specifc model repository on the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] function:
|
||||
|
||||
```py
|
||||
peft_model_id = "your-name/t5-large_PREFIX_TUNING_SEQ2SEQ"
|
||||
model.push_to_hub("your-name/t5-large_PREFIX_TUNING_SEQ2SEQ", use_auth_token=True)
|
||||
```
|
||||
|
||||
If you check the model file size in the repository, you'll see that it is only 3.93MB! 🤏
|
||||
|
||||
## Inference
|
||||
|
||||
Once the model has been uploaded to the Hub, anyone can easily use it for inference. Load the configuration and model:
|
||||
|
||||
```py
|
||||
from peft import PeftModel, PeftConfig
|
||||
|
||||
peft_model_id = "stevhliu/t5-large_PREFIX_TUNING_SEQ2SEQ"
|
||||
|
||||
config = PeftConfig.from_pretrained(peft_model_id)
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path)
|
||||
model = PeftModel.from_pretrained(model, peft_model_id)
|
||||
```
|
||||
|
||||
Get and tokenize some text about financial news:
|
||||
|
||||
```py
|
||||
inputs = tokenizer(
|
||||
"The Lithuanian beer market made up 14.41 million liters in January , a rise of 0.8 percent from the year-earlier figure , the Lithuanian Brewers ' Association reporting citing the results from its members .",
|
||||
return_tensors="pt",
|
||||
)
|
||||
```
|
||||
|
||||
Put the model on a GPU and *generate* the predicted text sentiment:
|
||||
|
||||
```py
|
||||
model.to(device)
|
||||
|
||||
with torch.no_grad():
|
||||
inputs = {k: v.to(device) for k, v in inputs.items()}
|
||||
outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10)
|
||||
print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))
|
||||
["positive"]
|
||||
```
|
@ -1,3 +1,7 @@
|
||||
<!--⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
-->
|
||||
|
||||
# LoRA for token classification
|
||||
|
||||
Low-Rank Adaptation (LoRA) is a reparametrization method that aims to reduce the number of trainable parameters with low-rank representations. The weight matrix is broken down into low-rank matrices that are trained and updated. All the pretrained model parameters remain frozen. After training, the low-rank matrices are added back to the original weights. This makes it more efficient to store and train a LoRA model because there are significantly fewer parameters.
|
141
docs/source/tutorial/peft_integrations.md
Normal file
141
docs/source/tutorial/peft_integrations.md
Normal file
@ -0,0 +1,141 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# PEFT integrations
|
||||
|
||||
PEFT's practical benefits extends to other Hugging Face libraries like [Diffusers](https://hf.co/docs/diffusers) and [Transformers](https://hf.co/docs/transformers). One of the main benefits of PEFT is that an adapter file generated by a PEFT method is a lot smaller than the original model, which makes it super easy to manage and use multiple adapters. You can use one pretrained base model for multiple tasks by simply loading a new adapter finetuned for the task you're solving. Or you can combine multiple adapters with a text-to-image diffusion model to create new effects.
|
||||
|
||||
This tutorial will show you how PEFT can help you manage adapters in Diffusers and Transformers.
|
||||
|
||||
## Diffusers
|
||||
|
||||
Diffusers is a generative AI library for creating images and videos from text or images with diffusion models. LoRA is an especially popular training method for diffusion models because you can very quickly train and share diffusion models to generate images in new styles. To make it easier to use and try multiple LoRA models, Diffusers uses the PEFT library to help manage different adapters for inference.
|
||||
|
||||
For example, load a base model and then load the [artificialguybr/3DRedmond-V1](https://huggingface.co/artificialguybr/3DRedmond-V1) adapter for inference with the [`load_lora_weights`](https://huggingface.co/docs/diffusers/v0.24.0/en/api/loaders/lora#diffusers.loaders.LoraLoaderMixin.load_lora_weights) method. The `adapter_name` argument in the loading method is enabled by PEFT and allows you to set a name for the adapter so it is easier to reference.
|
||||
|
||||
```py
|
||||
import torch
|
||||
from diffusers import DiffusionPipeline
|
||||
|
||||
pipeline = DiffusionPipeline.from_pretrained(
|
||||
"stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
|
||||
).to("cuda")
|
||||
pipeline.load_lora_weights(
|
||||
"peft-internal-testing/artificialguybr__3DRedmond-V1",
|
||||
weight_name="3DRedmond-3DRenderStyle-3DRenderAF.safetensors",
|
||||
adapter_name="3d"
|
||||
)
|
||||
image = pipeline("sushi rolls shaped like kawaii cat faces").images[0]
|
||||
image
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/test-lora-diffusers.png"/>
|
||||
</div>
|
||||
|
||||
Now let's try another cool LoRA model, [ostris/super-cereal-sdxl-lora](https://huggingface.co/ostris/super-cereal-sdxl-lora). All you need to do is load and name this new adapter with `adapter_name`, and use the [`set_adapters`](https://huggingface.co/docs/diffusers/api/loaders/unet#diffusers.loaders.UNet2DConditionLoadersMixin.set_adapters) method to set it as the currently active adapter.
|
||||
|
||||
```py
|
||||
pipeline.load_lora_weights(
|
||||
"ostris/super-cereal-sdxl-lora",
|
||||
weight_name="cereal_box_sdxl_v1.safetensors",
|
||||
adapter_name="cereal"
|
||||
)
|
||||
pipeline.set_adapters("cereal")
|
||||
image = pipeline("sushi rolls shaped like kawaii cat faces").images[0]
|
||||
image
|
||||
```
|
||||
|
||||
<div class="flex justify-center">
|
||||
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/test-lora-diffusers-2.png"/>
|
||||
</div>
|
||||
|
||||
Finally, you can call the [`disable_lora`](https://huggingface.co/docs/diffusers/api/loaders/unet#diffusers.loaders.UNet2DConditionLoadersMixin.disable_lora) method to restore the base model.
|
||||
|
||||
```py
|
||||
pipeline.disable_lora()
|
||||
```
|
||||
|
||||
Learn more about how PEFT supports Diffusers in the [Inference with PEFT](https://huggingface.co/docs/diffusers/tutorials/using_peft_for_inference) tutorial.
|
||||
|
||||
## Transformers
|
||||
|
||||
Transformers is a collection of pretrained models for all types of tasks in all modalities. You can load these models for training or inference. Many of the models are large language models (LLMs), so it makes sense to integrate PEFT with Transformers to manage and train adapters.
|
||||
|
||||
Load a base pretrained model to train.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
|
||||
```
|
||||
|
||||
Next, add an adapter configuration to specify how to adapt the model parameters. Call the [`~PeftModel.add_adapter`] method to add the configuration to the base model.
|
||||
|
||||
```py
|
||||
from peft import LoraConfig
|
||||
|
||||
config = LoraConfig(
|
||||
lora_alpha=16,
|
||||
lora_dropout=0.1,
|
||||
r=64,
|
||||
bias="none",
|
||||
task_type="CAUSAL_LM"
|
||||
)
|
||||
model.add_adapter(peft_config)
|
||||
```
|
||||
|
||||
Now you can train the model with Transformer's [`~transformers.Trainer`] class or whichever training framework you prefer.
|
||||
|
||||
To use the newly trained model for inference, the [`~transformers.AutoModel`] class uses PEFT on the backend to load the adapter weights and configuration file into a base pretrained model.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora")
|
||||
```
|
||||
|
||||
If you're interested in comparing or using more than one adapter, you can also call the [`~PeftModel.add_adapter`] method to add the adapter configuration to the base model. The only requirement is the adapter type must be the same (you can't mix a LoRA and LoHa adapter).
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
from peft import LoraConfig
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
|
||||
model.add_adapter(lora_config_1, adapter_name="adapter_1")
|
||||
```
|
||||
|
||||
Call [`~PeftModel.add_adapter`] again to attach a new adapter to the base model.
|
||||
|
||||
```py
|
||||
model.add_adapter(lora_config_2, adapter_name="adapter_2")
|
||||
```
|
||||
|
||||
Then you can use [`~PeftModel.set_adapter`] to set the currently active adapter.
|
||||
|
||||
```py
|
||||
model.set_adapter("adapter_1")
|
||||
output = model.generate(**inputs)
|
||||
print(tokenizer.decode(output_disabled[0], skip_special_tokens=True))
|
||||
```
|
||||
|
||||
To disable the adapter, call the [`~PeftModel.disable_adapter`] method.
|
||||
|
||||
```py
|
||||
model.disable_adapter()
|
||||
```
|
||||
|
||||
If you're curious, check out the [Load and train adapters with PEFT](https://huggingface.co/docs/transformers/main/peft) tutorial to learn more.
|
182
docs/source/tutorial/peft_model_config.md
Normal file
182
docs/source/tutorial/peft_model_config.md
Normal file
@ -0,0 +1,182 @@
|
||||
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
|
||||
the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
|
||||
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
|
||||
specific language governing permissions and limitations under the License.
|
||||
|
||||
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
|
||||
rendered properly in your Markdown viewer.
|
||||
|
||||
-->
|
||||
|
||||
# PEFT configurations and models
|
||||
|
||||
The sheer size of today's large pretrained models - which commonly have billions of parameters - present a significant training challenge because they require more storage space and more computational power to crunch all those calculations. You'll need access to powerful GPUs or TPUs to train these large pretrained models which is expensive, not widely accessible to everyone, not environmentally friendly, and not very practical. PEFT methods address many of these challenges. There are several types of PEFT methods (soft prompting, matrix decomposition, adapters), but they all focus on the same thing, reduce the number of trainable parameters. This makes it more accessible to train and store large models on consumer hardware.
|
||||
|
||||
The PEFT library is designed to help you quickly train large models on free or low-cost GPUs, and in this tutorial, you'll learn how to setup a configuration to apply a PEFT method to a pretrained base model for training. Once the PEFT configuration is setup, you can use any training framework you like (Transformer's [`~transformers.Trainer`] class, [Accelerate](https://hf.co/docs/accelerate), a custom PyTorch training loop).
|
||||
|
||||
## PEFT configurations
|
||||
|
||||
<Tip>
|
||||
|
||||
Learn more about the parameters you can configure for each PEFT method in their respective API reference page.
|
||||
|
||||
</Tip>
|
||||
|
||||
A configuration stores important parameters that specify how a particular PEFT method should be applied.
|
||||
|
||||
For example, take a look at the following [`LoraConfig`](https://huggingface.co/ybelkada/opt-350m-lora/blob/main/adapter_config.json) for applying LoRA and [`PromptEncoderConfig`](https://huggingface.co/smangrul/roberta-large-peft-p-tuning/blob/main/adapter_config.json) for applying p-tuning (these configuration files are already JSON-serialized). Whenever you load a PEFT adapter, it is a good idea to check whether it has an associated adapter_config.json file which is required.
|
||||
|
||||
<hfoptions id="config">
|
||||
<hfoption id="LoraConfig">
|
||||
|
||||
```json
|
||||
{
|
||||
"base_model_name_or_path": "facebook/opt-350m", #base model to apply LoRA to
|
||||
"bias": "none",
|
||||
"fan_in_fan_out": false,
|
||||
"inference_mode": true,
|
||||
"init_lora_weights": true,
|
||||
"layers_pattern": null,
|
||||
"layers_to_transform": null,
|
||||
"lora_alpha": 32,
|
||||
"lora_dropout": 0.05,
|
||||
"modules_to_save": null,
|
||||
"peft_type": "LORA", #PEFT method type
|
||||
"r": 16,
|
||||
"revision": null,
|
||||
"target_modules": [
|
||||
"q_proj", #model modules to apply LoRA to (query and value projection layers)
|
||||
"v_proj"
|
||||
],
|
||||
"task_type": "CAUSAL_LM" #type of task to train model on
|
||||
}
|
||||
```
|
||||
|
||||
You can create your own configuration for training by initializing a [`LoraConfig`].
|
||||
|
||||
```py
|
||||
from peft import LoraConfig, TaskType
|
||||
|
||||
lora_config = LoraConfig(
|
||||
r=16,
|
||||
target_modules=["q_proj", "v_proj"],
|
||||
task_type=TaskType.CAUSAL_LM,
|
||||
lora_alpha=32,
|
||||
lora_dropout=0.05
|
||||
)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
<hfoption id="PromptEncoderConfig">
|
||||
|
||||
```json
|
||||
{
|
||||
"base_model_name_or_path": "roberta-large", #base model to apply p-tuning to
|
||||
"encoder_dropout": 0.0,
|
||||
"encoder_hidden_size": 128,
|
||||
"encoder_num_layers": 2,
|
||||
"encoder_reparameterization_type": "MLP",
|
||||
"inference_mode": true,
|
||||
"num_attention_heads": 16,
|
||||
"num_layers": 24,
|
||||
"num_transformer_submodules": 1,
|
||||
"num_virtual_tokens": 20,
|
||||
"peft_type": "P_TUNING", #PEFT method type
|
||||
"task_type": "SEQ_CLS", #type of task to train model on
|
||||
"token_dim": 1024
|
||||
}
|
||||
```
|
||||
|
||||
You can create your own configuration for training by initializing a [`PromptEncoderConfig`].
|
||||
|
||||
```py
|
||||
from peft import PromptEncoderConfig, TaskType
|
||||
|
||||
p_tuning_config = PromptEncoderConfig(
|
||||
encoder_reprameterization_type="MLP",
|
||||
encoder_hidden_size=128,
|
||||
num_attention_heads=16,
|
||||
num_layers=24,
|
||||
num_transformer_submodules=1,
|
||||
num_virtual_tokens=20,
|
||||
token_dim=1024,
|
||||
task_type=TaskType.SEQ_CLS
|
||||
)
|
||||
```
|
||||
|
||||
</hfoption>
|
||||
</hfoptions>
|
||||
|
||||
## PEFT models
|
||||
|
||||
With a PEFT configuration in hand, you can now apply it to any pretrained model to create a [`PeftModel`]. Choose from any of the state-of-the-art models from the [Transformers](https://hf.co/docs/transformers) library, a custom model, and even new and unsupported transformer architectures.
|
||||
|
||||
For this tutorial, load a base [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) model to finetune.
|
||||
|
||||
```py
|
||||
from transformers import AutoModelForCausalLM
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
|
||||
```
|
||||
|
||||
Use the [`get_peft_model`] function to create a [`PeftModel`] from the base facebook/opt-350m model and the `lora_config` you created earlier.
|
||||
|
||||
```py
|
||||
from peft import get_peft_model
|
||||
|
||||
lora_model = get_peft_model(model, lora_config)
|
||||
lora_model.print_trainable_parameters()
|
||||
"trainable params: 1,572,864 || all params: 332,769,280 || trainable%: 0.472659014678278"
|
||||
```
|
||||
|
||||
Now you can train the [`PeftModel`] with your preferred training framework! After training, you can save your model locally with [`~PeftModel.save_pretrained`] or upload it to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method.
|
||||
|
||||
```py
|
||||
# save locally
|
||||
lora_model.save_pretrained("your-name/opt-350m-lora")
|
||||
|
||||
# push to Hub
|
||||
lora_model.push_to_hub("your-name/opt-350m-lora")
|
||||
```
|
||||
|
||||
To load a [`PeftModel`] for inference, you'll need to provide the [`PeftConfig`] used to create it and the base model it was trained from.
|
||||
|
||||
```py
|
||||
from peft import PeftModel, PeftConfig
|
||||
|
||||
config = PeftConfig.from_pretrained("ybelkada/opt-350m-lora")
|
||||
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
|
||||
lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora")
|
||||
```
|
||||
|
||||
<Tip>
|
||||
|
||||
By default, the [`PeftModel`] is set for inference, but if you'd like to train the adapter some more you can set `is_trainable=True`.
|
||||
|
||||
```py
|
||||
lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora", is_trainable=True)
|
||||
```
|
||||
|
||||
</Tip>
|
||||
|
||||
The [`PeftModel.from_pretrained`] method is the most flexible way to load a [`PeftModel`] because it doesn't matter what model framework was used (Transformers, timm, a generic PyTorch model). Other classes, like [`AutoPeftModel`], are just a convenient wrapper around the base [`PeftModel`], and makes it easier to load PEFT models directly from the Hub or locally where the PEFT weights are stored.
|
||||
|
||||
```py
|
||||
from peft import AutoPeftModelForCausalLM
|
||||
|
||||
lora_model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora")
|
||||
```
|
||||
|
||||
Take a look at the [AutoPeftModel](package_reference/auto_class) API reference to learn more about the [`AutoPeftModel`] classes.
|
||||
|
||||
## Next steps
|
||||
|
||||
With the appropriate [`PeftConfig`], you can apply it to any pretrained model to create a [`PeftModel`] and train large powerful models faster on freely available GPUs! To learn more about PEFT configurations and models, the following guide may be helpful:
|
||||
|
||||
* Learn how to configure a PEFT method for models that aren't from Transformers in the [Working with custom models](../developer_guides/custom_models) guide.
|
@ -124,10 +124,10 @@
|
||||
" inputs = [f\"{text_column} : {x} Label : \" for x in examples[text_column]]\n",
|
||||
" targets = [str(x) for x in examples[label_column]]\n",
|
||||
" model_inputs = tokenizer(inputs)\n",
|
||||
" labels = tokenizer(targets)\n",
|
||||
" labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs\n",
|
||||
" for i in range(batch_size):\n",
|
||||
" sample_input_ids = model_inputs[\"input_ids\"][i]\n",
|
||||
" label_input_ids = labels[\"input_ids\"][i] + [tokenizer.pad_token_id]\n",
|
||||
" label_input_ids = labels[\"input_ids\"][i] + [tokenizer.eos_token_id]\n",
|
||||
" # print(i, sample_input_ids, label_input_ids)\n",
|
||||
" model_inputs[\"input_ids\"][i] = sample_input_ids + label_input_ids\n",
|
||||
" labels[\"input_ids\"][i] = [-100] * len(sample_input_ids) + label_input_ids\n",
|
||||
@ -210,6 +210,23 @@
|
||||
"print(next(iter(test_dataloader)))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "42b14a11",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can load model from hub or local\n",
|
||||
"\n",
|
||||
"- Load model from Hugging Face Hub, you can change to your own model id\n",
|
||||
"```python\n",
|
||||
"peft_model_id = \"username/twitter_complaints_bigscience_bloomz-7b1_LORA_CAUSAL_LM\"\n",
|
||||
"```\n",
|
||||
"- Or load model form local\n",
|
||||
"```python\n",
|
||||
"peft_model_id = \"twitter_complaints_bigscience_bloomz-7b1_LORA_CAUSAL_LM\"\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
@ -244,7 +261,6 @@
|
||||
"\n",
|
||||
"max_memory = {0: \"1GIB\", 1: \"1GIB\", 2: \"2GIB\", 3: \"10GIB\", \"cpu\": \"30GB\"}\n",
|
||||
"peft_model_id = \"smangrul/twitter_complaints_bigscience_bloomz-7b1_LORA_CAUSAL_LM\"\n",
|
||||
"\n",
|
||||
"config = PeftConfig.from_pretrained(peft_model_id)\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, device_map=\"auto\", max_memory=max_memory)\n",
|
||||
"model = PeftModel.from_pretrained(model, peft_model_id, device_map=\"auto\", max_memory=max_memory)"
|
||||
|
@ -136,10 +136,10 @@ def main():
|
||||
inputs = [f"{text_column} : {x} Label : " for x in examples[text_column]]
|
||||
targets = [str(x) for x in examples[label_column]]
|
||||
model_inputs = tokenizer(inputs)
|
||||
labels = tokenizer(targets)
|
||||
labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs
|
||||
for i in range(batch_size):
|
||||
sample_input_ids = model_inputs["input_ids"][i]
|
||||
label_input_ids = labels["input_ids"][i] + [tokenizer.pad_token_id]
|
||||
label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id]
|
||||
model_inputs["input_ids"][i] = sample_input_ids + label_input_ids
|
||||
labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids
|
||||
model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i])
|
||||
@ -349,12 +349,21 @@ def main():
|
||||
pred_df.to_csv(f"data/{dataset_name}/predictions.csv", index=False)
|
||||
|
||||
accelerator.wait_for_everyone()
|
||||
model.push_to_hub(
|
||||
"smangrul/"
|
||||
+ f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"),
|
||||
state_dict=accelerator.get_state_dict(model),
|
||||
use_auth_token=True,
|
||||
# Option1: Pushing the model to Hugging Face Hub
|
||||
# model.push_to_hub(
|
||||
# f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"),
|
||||
# token = "hf_..."
|
||||
# )
|
||||
# token (`bool` or `str`, *optional*):
|
||||
# `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated
|
||||
# when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url`
|
||||
# is not specified.
|
||||
# Or you can get your token from https://huggingface.co/settings/token
|
||||
# Option2: Saving the model locally
|
||||
peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace(
|
||||
"/", "_"
|
||||
)
|
||||
model.save_pretrained(peft_model_id)
|
||||
accelerator.wait_for_everyone()
|
||||
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -173,10 +173,10 @@
|
||||
" inputs = [f\"{text_column} : {x} Label : \" for x in examples[text_column]]\n",
|
||||
" targets = [str(x) for x in examples[label_column]]\n",
|
||||
" model_inputs = tokenizer(inputs)\n",
|
||||
" labels = tokenizer(targets)\n",
|
||||
" labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs\n",
|
||||
" for i in range(batch_size):\n",
|
||||
" sample_input_ids = model_inputs[\"input_ids\"][i]\n",
|
||||
" label_input_ids = labels[\"input_ids\"][i] + [tokenizer.pad_token_id]\n",
|
||||
" label_input_ids = labels[\"input_ids\"][i] + [tokenizer.eos_token_id]\n",
|
||||
" # print(i, sample_input_ids, label_input_ids)\n",
|
||||
" model_inputs[\"input_ids\"][i] = sample_input_ids + label_input_ids\n",
|
||||
" labels[\"input_ids\"][i] = [-100] * len(sample_input_ids) + label_input_ids\n",
|
||||
@ -1228,6 +1228,33 @@
|
||||
" print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0e21c49b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can push model to hub or save model locally. \n",
|
||||
"\n",
|
||||
"- Option1: Pushing the model to Hugging Face Hub\n",
|
||||
"```python\n",
|
||||
"model.push_to_hub(\n",
|
||||
" f\"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\".replace(\"/\", \"_\"),\n",
|
||||
" token = \"hf_...\"\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"token (`bool` or `str`, *optional*):\n",
|
||||
" `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated\n",
|
||||
" when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url`\n",
|
||||
" is not specified.\n",
|
||||
" Or you can get your token from https://huggingface.co/settings/token\n",
|
||||
"```\n",
|
||||
"- Or save model locally\n",
|
||||
"```python\n",
|
||||
"peft_model_id = f\"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\".replace(\"/\", \"_\")\n",
|
||||
"model.save_pretrained(peft_model_id)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
@ -1236,7 +1263,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# saving model\n",
|
||||
"peft_model_id = f\"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\"\n",
|
||||
"peft_model_id = f\"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\".replace(\n",
|
||||
" \"/\", \"_\"\n",
|
||||
")\n",
|
||||
"model.save_pretrained(peft_model_id)"
|
||||
]
|
||||
},
|
||||
@ -1260,7 +1289,9 @@
|
||||
"source": [
|
||||
"from peft import PeftModel, PeftConfig\n",
|
||||
"\n",
|
||||
"peft_model_id = f\"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\"\n",
|
||||
"peft_model_id = f\"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\".replace(\n",
|
||||
" \"/\", \"_\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"config = PeftConfig.from_pretrained(peft_model_id)\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)\n",
|
||||
|
@ -83,10 +83,10 @@
|
||||
" inputs = [f\"{text_column} : {x} Label : \" for x in examples[text_column]]\n",
|
||||
" targets = [str(x) for x in examples[label_column]]\n",
|
||||
" model_inputs = tokenizer(inputs)\n",
|
||||
" labels = tokenizer(targets)\n",
|
||||
" labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs\n",
|
||||
" for i in range(batch_size):\n",
|
||||
" sample_input_ids = model_inputs[\"input_ids\"][i]\n",
|
||||
" label_input_ids = labels[\"input_ids\"][i] + [tokenizer.pad_token_id]\n",
|
||||
" label_input_ids = labels[\"input_ids\"][i] + [tokenizer.eos_token_id]\n",
|
||||
" # print(i, sample_input_ids, label_input_ids)\n",
|
||||
" model_inputs[\"input_ids\"][i] = sample_input_ids + label_input_ids\n",
|
||||
" labels[\"input_ids\"][i] = [-100] * len(sample_input_ids) + label_input_ids\n",
|
||||
@ -1072,6 +1072,33 @@
|
||||
" print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "c8f35152",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"You can push model to hub or save model locally. \n",
|
||||
"\n",
|
||||
"- Option1: Pushing the model to Hugging Face Hub\n",
|
||||
"```python\n",
|
||||
"model.push_to_hub(\n",
|
||||
" f\"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\".replace(\"/\", \"_\"),\n",
|
||||
" token = \"hf_...\"\n",
|
||||
")\n",
|
||||
"```\n",
|
||||
"token (`bool` or `str`, *optional*):\n",
|
||||
" `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated\n",
|
||||
" when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url`\n",
|
||||
" is not specified.\n",
|
||||
" Or you can get your token from https://huggingface.co/settings/token\n",
|
||||
"```\n",
|
||||
"- Or save model locally\n",
|
||||
"```python\n",
|
||||
"peft_model_id = f\"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\".replace(\"/\", \"_\")\n",
|
||||
"model.save_pretrained(peft_model_id)\n",
|
||||
"```"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
@ -1080,7 +1107,9 @@
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# saving model\n",
|
||||
"peft_model_id = f\"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\"\n",
|
||||
"peft_model_id = f\"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\".replace(\n",
|
||||
" \"/\", \"_\"\n",
|
||||
")\n",
|
||||
"model.save_pretrained(peft_model_id)"
|
||||
]
|
||||
},
|
||||
@ -1116,7 +1145,9 @@
|
||||
"source": [
|
||||
"from peft import PeftModel, PeftConfig\n",
|
||||
"\n",
|
||||
"peft_model_id = f\"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\"\n",
|
||||
"peft_model_id = f\"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\".replace(\n",
|
||||
" \"/\", \"_\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"config = PeftConfig.from_pretrained(peft_model_id)\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)\n",
|
||||
|
@ -1,6 +1,5 @@
|
||||
transformers
|
||||
accelerate
|
||||
loralib
|
||||
evaluate
|
||||
deepspeed
|
||||
tqdm
|
||||
|
408
examples/conditional_generation/multitask_prompt_tuning.ipynb
Normal file
408
examples/conditional_generation/multitask_prompt_tuning.ipynb
Normal file
@ -0,0 +1,408 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "58ff91ca-ce92-43d0-ae8b-4e9e89e193f6",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from datasets import load_dataset\n",
|
||||
"from transformers import set_seed, AutoModelForSeq2SeqLM, AutoTokenizer\n",
|
||||
"from peft import get_peft_model, MultitaskPromptTuningConfig, TaskType, MultitaskPromptTuningInit\n",
|
||||
"\n",
|
||||
"set_seed(42)\n",
|
||||
"\n",
|
||||
"model_name = \"google/flan-t5-base\"\n",
|
||||
"\n",
|
||||
"peft_config = MultitaskPromptTuningConfig(\n",
|
||||
" tokenizer_name_or_path=model_name,\n",
|
||||
" num_tasks=2,\n",
|
||||
" task_type=TaskType.SEQ_2_SEQ_LM,\n",
|
||||
" prompt_tuning_init=MultitaskPromptTuningInit.TEXT,\n",
|
||||
" num_virtual_tokens=50,\n",
|
||||
" num_transformer_submodules=1,\n",
|
||||
" prompt_tuning_init_text=\"classify the following into either positive or negative, or entailment, neutral or contradiction:\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
|
||||
"model = AutoModelForSeq2SeqLM.from_pretrained(model_name)\n",
|
||||
"model = get_peft_model(model, peft_config)\n",
|
||||
"\n",
|
||||
"model = model.cuda()\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def send_to_device(batch):\n",
|
||||
" for i in batch:\n",
|
||||
" batch[i] = batch[i].cuda()\n",
|
||||
" return batch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "eb112bc1-ffaf-49fa-a216-0d601ec304ee",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def get_sst2(split: str):\n",
|
||||
" examples = load_dataset(\"sst2\")[split]\n",
|
||||
" result_examples = []\n",
|
||||
" for example in examples:\n",
|
||||
" result_examples.append({})\n",
|
||||
"\n",
|
||||
" result_examples[-1][\"input\"] = example[\"sentence\"].strip() + \"</s>\"\n",
|
||||
" result_examples[-1][\"output\"] = (\n",
|
||||
" f\"positive{tokenizer.eos_token}\" if example[\"label\"] == 1 else f\"negative{tokenizer.eos_token}\"\n",
|
||||
" )\n",
|
||||
" result_examples[-1][\"task_id\"] = 0\n",
|
||||
"\n",
|
||||
" return result_examples\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def get_mnli(split: str):\n",
|
||||
" examples = load_dataset(\"multi_nli\")[split]\n",
|
||||
" result_examples = []\n",
|
||||
" for example in examples:\n",
|
||||
" result_examples.append({})\n",
|
||||
"\n",
|
||||
" result_examples[-1][\"input\"] = example[\"premise\"].strip() + \" \" + example[\"hypothesis\"].strip() + \"</s>\"\n",
|
||||
"\n",
|
||||
" if example[\"label\"] == 0:\n",
|
||||
" result_examples[-1][\"output\"] = f\"entailment{tokenizer.eos_token}\"\n",
|
||||
" elif example[\"label\"] == 1:\n",
|
||||
" result_examples[-1][\"output\"] = f\"neutral{tokenizer.eos_token}\"\n",
|
||||
" else:\n",
|
||||
" result_examples[-1][\"output\"] = f\"contradiction{tokenizer.eos_token}\"\n",
|
||||
"\n",
|
||||
" result_examples[-1][\"task_id\"] = 1\n",
|
||||
"\n",
|
||||
" return result_examples"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "e5a16ec4-8fef-4ba9-95b6-a661eb51e50c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from typing import Tuple\n",
|
||||
"from torch.utils.data import Dataset, DataLoader\n",
|
||||
"import torch\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class MyDataset(Dataset):\n",
|
||||
" def __init__(self, split: str, mode: str = \"source\") -> None:\n",
|
||||
" super().__init__()\n",
|
||||
"\n",
|
||||
" if split == \"train\":\n",
|
||||
" if mode == \"source\":\n",
|
||||
" self.examples = get_sst2(split) + get_mnli(split)\n",
|
||||
" elif mode == \"target\":\n",
|
||||
" self.examples = get_sst2(split)\n",
|
||||
" if split == \"val\":\n",
|
||||
" self.examples = get_sst2(\"validation\")\n",
|
||||
" if split == \"test\":\n",
|
||||
" self.examples = get_sst2(\"validation\")\n",
|
||||
"\n",
|
||||
" def __getitem__(self, index) -> dict:\n",
|
||||
" return self.examples[index]\n",
|
||||
"\n",
|
||||
" def __len__(self) -> int:\n",
|
||||
" return len(self.examples)\n",
|
||||
"\n",
|
||||
" def __getitem__(self, index) -> dict:\n",
|
||||
" return self.examples[index]\n",
|
||||
"\n",
|
||||
" def __len__(self) -> int:\n",
|
||||
" return len(self.examples)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def collate_fn(batch: dict) -> Tuple[torch.Tensor, torch.Tensor]:\n",
|
||||
" input = [i[\"input\"] for i in batch]\n",
|
||||
" input = tokenizer(input, add_special_tokens=False, return_tensors=\"pt\", padding=True)\n",
|
||||
"\n",
|
||||
" output = [i[\"output\"] for i in batch]\n",
|
||||
" output = tokenizer(output, add_special_tokens=False, return_tensors=\"pt\", padding=True).input_ids\n",
|
||||
" output[output == tokenizer.pad_token_id] = -100\n",
|
||||
"\n",
|
||||
" task_ids = [i[\"task_id\"] for i in batch]\n",
|
||||
" task_ids = torch.tensor(task_ids)\n",
|
||||
"\n",
|
||||
" return {\n",
|
||||
" \"input_ids\": input.input_ids,\n",
|
||||
" \"attention_mask\": input.attention_mask,\n",
|
||||
" \"labels\": output,\n",
|
||||
" \"task_ids\": task_ids,\n",
|
||||
" }\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"train = DataLoader(MyDataset(\"train\"), shuffle=True, batch_size=8, collate_fn=collate_fn)\n",
|
||||
"val = DataLoader(MyDataset(\"val\"), shuffle=False, batch_size=8, collate_fn=collate_fn)\n",
|
||||
"test = DataLoader(MyDataset(\"test\"), shuffle=False, batch_size=8, collate_fn=collate_fn)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fe0aec7b-f61e-4b00-a90e-c1201dc1f84c",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## source training"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "cceecc94-f43a-4f62-8d45-926f2f02f36d",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from torch.optim.adamw import AdamW\n",
|
||||
"from transformers import get_cosine_schedule_with_warmup\n",
|
||||
"from tqdm import tqdm\n",
|
||||
"from sklearn.metrics import f1_score"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "eae5516b-73ab-44a8-a083-4e8de6127f30",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"POSITIVE_TOKEN_ID = tokenizer(\" positive\", add_special_tokens=False)[\"input_ids\"][0]\n",
|
||||
"NEGATIVE_TOKEN_ID = tokenizer(\" negative\", add_special_tokens=False)[\"input_ids\"][0]\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def classify(batch):\n",
|
||||
" batch = send_to_device(batch)\n",
|
||||
" # we pass labels here since we need to generate and peft doesn't support generation yet.\n",
|
||||
" # No clue how to get around this\n",
|
||||
" scores = model(**batch).logits\n",
|
||||
" preds = []\n",
|
||||
" for i in range(scores.shape[0]):\n",
|
||||
" if scores[i, 0, POSITIVE_TOKEN_ID] > scores[i, 0, NEGATIVE_TOKEN_ID]:\n",
|
||||
" preds.append(POSITIVE_TOKEN_ID)\n",
|
||||
" else:\n",
|
||||
" preds.append(NEGATIVE_TOKEN_ID)\n",
|
||||
" return preds\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"@torch.inference_mode()\n",
|
||||
"def evaluate(model, data):\n",
|
||||
" loss = 0\n",
|
||||
" preds = []\n",
|
||||
" golds = []\n",
|
||||
"\n",
|
||||
" for batch in tqdm(data):\n",
|
||||
" batch = send_to_device(batch)\n",
|
||||
" loss += model(**batch).loss\n",
|
||||
" golds.extend(batch[\"labels\"][:, 0].tolist())\n",
|
||||
" preds.extend(classify(batch))\n",
|
||||
"\n",
|
||||
" return loss / len(val), f1_score(golds, preds, pos_label=POSITIVE_TOKEN_ID)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"optimizer = AdamW(model.parameters(), lr=1e-4)\n",
|
||||
"scheduler = get_cosine_schedule_with_warmup(optimizer, 200, len(train))\n",
|
||||
"\n",
|
||||
"n = 1000\n",
|
||||
"step = 0\n",
|
||||
"train_ = tqdm(train)\n",
|
||||
"\n",
|
||||
"val_loss, f1 = evaluate(model, val)\n",
|
||||
"print(\n",
|
||||
" f\"\"\"\n",
|
||||
"before source training\n",
|
||||
"val loss = {val_loss}\n",
|
||||
"f1 = {f1}\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for batch in train_:\n",
|
||||
" if step % n == 0:\n",
|
||||
" val_loss, f1 = evaluate(model, val)\n",
|
||||
" print(\n",
|
||||
" f\"\"\"\n",
|
||||
"step = {step}\n",
|
||||
"val loss = {val_loss}\n",
|
||||
"f1 = {f1}\"\"\"\n",
|
||||
" )\n",
|
||||
" model.save_pretrained(f\"checkpoints_source/{step}\")\n",
|
||||
"\n",
|
||||
" step += 1\n",
|
||||
" batch = send_to_device(batch)\n",
|
||||
" loss = model(**batch).loss\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
" scheduler.step()\n",
|
||||
" train_.set_postfix(train_loss=loss)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "74168ef3-66f3-41a7-a40b-7840b103fbf9",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## target training"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b09fd456-163e-4dc1-b24d-f2d0d349036c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"train = DataLoader(MyDataset(\"train\", \"target\"), shuffle=True, batch_size=8, collate_fn=collate_fn)\n",
|
||||
"val = DataLoader(MyDataset(\"val\", \"target\"), shuffle=False, batch_size=8, collate_fn=collate_fn)\n",
|
||||
"test = DataLoader(MyDataset(\"test\", \"target\"), shuffle=False, batch_size=8, collate_fn=collate_fn)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "4a539944-f16c-4c3f-bb4a-7b5d9a6042e2",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### create a fresh model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "5520d904-aa6c-4654-9335-ed4e7d76cba2",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"peft_config = MultitaskPromptTuningConfig(\n",
|
||||
" tokenizer_name_or_path=model_name,\n",
|
||||
" num_tasks=1,\n",
|
||||
" task_type=TaskType.SEQ_2_SEQ_LM,\n",
|
||||
" prompt_tuning_init=MultitaskPromptTuningInit.EXACT_SOURCE_TASK,\n",
|
||||
" prompt_tuning_init_state_dict_path=\"checkpoints_source/50000/adapter_model.bin\",\n",
|
||||
" num_virtual_tokens=50,\n",
|
||||
" num_transformer_submodules=1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
|
||||
"model = AutoModelForSeq2SeqLM.from_pretrained(model_name)\n",
|
||||
"model = get_peft_model(model, peft_config)\n",
|
||||
"\n",
|
||||
"model = model.cuda()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "dfa39c2d-d1c5-4ed4-90f8-26e8e324371c",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"optimizer = AdamW(model.parameters(), lr=1e-4)\n",
|
||||
"scheduler = get_cosine_schedule_with_warmup(optimizer, 200, len(train))\n",
|
||||
"\n",
|
||||
"n = 1000\n",
|
||||
"step = 0\n",
|
||||
"train_ = tqdm(train)\n",
|
||||
"\n",
|
||||
"val_loss, f1 = evaluate(model, val)\n",
|
||||
"print(\n",
|
||||
" f\"\"\"\n",
|
||||
"before target training\n",
|
||||
"val loss = {val_loss}\n",
|
||||
"f1 = {f1}\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"for batch in train_:\n",
|
||||
" if step % n == 0:\n",
|
||||
" val_loss, f1 = evaluate(model, val)\n",
|
||||
" print(\n",
|
||||
" f\"\"\"\n",
|
||||
"step = {step}\n",
|
||||
"val loss = {val_loss}\n",
|
||||
"f1 = {f1}\"\"\"\n",
|
||||
" )\n",
|
||||
" model.save_pretrained(f\"checkpoints_target/{step}\")\n",
|
||||
"\n",
|
||||
" step += 1\n",
|
||||
" batch = send_to_device(batch)\n",
|
||||
" loss = model(**batch).loss\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
" scheduler.step()\n",
|
||||
" train_.set_postfix(train_loss=loss)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b6a6eeda-1e09-49a6-8845-cd96c8573145",
|
||||
"metadata": {
|
||||
"tags": []
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# load last checkpoint for now\n",
|
||||
"from peft import set_peft_model_state_dict\n",
|
||||
"\n",
|
||||
"sd_6000 = torch.load(\"checkpoints_target/6000/adapter_model.bin\")\n",
|
||||
"set_peft_model_state_dict(model, sd_6000)\n",
|
||||
"\n",
|
||||
"# evaluate val\n",
|
||||
"val_loss, f1 = evaluate(model, val)\n",
|
||||
"print(\n",
|
||||
" f\"\"\"\n",
|
||||
"final\n",
|
||||
"val loss = {val_loss}\n",
|
||||
"f1 = {f1}\"\"\"\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# evaluate test\n",
|
||||
"test_loss, f1 = evaluate(model, test)\n",
|
||||
"print(\n",
|
||||
" f\"\"\"\n",
|
||||
"final\n",
|
||||
"test loss = {test_loss}\n",
|
||||
"f1 = {f1}\"\"\"\n",
|
||||
")"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.13"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
2711
examples/conditional_generation/peft_ia3_seq2seq.ipynb
Normal file
2711
examples/conditional_generation/peft_ia3_seq2seq.ipynb
Normal file
File diff suppressed because it is too large
Load Diff
@ -298,12 +298,22 @@ def main():
|
||||
pred_df.to_csv(f"data/{dataset_name}/predictions.csv", index=False)
|
||||
|
||||
accelerator.wait_for_everyone()
|
||||
model.push_to_hub(
|
||||
"smangrul/"
|
||||
+ f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"),
|
||||
state_dict=accelerator.get_state_dict(model),
|
||||
use_auth_token=True,
|
||||
# Option1: Pushing the model to Hugging Face Hub
|
||||
# model.push_to_hub(
|
||||
# f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"),
|
||||
# token = "hf_..."
|
||||
# )
|
||||
# token (`bool` or `str`, *optional*):
|
||||
# `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated
|
||||
# when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url`
|
||||
# is not specified.
|
||||
# Or you can get your token from https://huggingface.co/settings/token
|
||||
|
||||
# Option2: Saving the model locally
|
||||
peft_model_id = f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace(
|
||||
"/", "_"
|
||||
)
|
||||
model.save_pretrained(peft_model_id)
|
||||
accelerator.wait_for_everyone()
|
||||
|
||||
|
||||
|
@ -125,11 +125,19 @@ def main():
|
||||
accelerator.print(f"{eval_preds[:10]=}")
|
||||
accelerator.print(f"{dataset['validation'][label_column][:10]=}")
|
||||
accelerator.wait_for_everyone()
|
||||
model.push_to_hub(
|
||||
"smangrul/" + f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"),
|
||||
state_dict=accelerator.get_state_dict(model),
|
||||
use_auth_token=True,
|
||||
)
|
||||
# Option1: Pushing the model to Hugging Face Hub
|
||||
# model.push_to_hub(
|
||||
# f"{dataset_name}_{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_"),
|
||||
# token = "hf_..."
|
||||
# )
|
||||
# token (`bool` or `str`, *optional*):
|
||||
# `token` is to be used for HTTP Bearer authorization when accessing remote files. If `True`, will use the token generated
|
||||
# when running `huggingface-cli login` (stored in `~/.huggingface`). Will default to `True` if `repo_url`
|
||||
# is not specified.
|
||||
# Or you can get your token from https://huggingface.co/settings/token
|
||||
# Option2: Saving the model locally
|
||||
peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}".replace("/", "_")
|
||||
model.save_pretrained(peft_model_id)
|
||||
accelerator.wait_for_everyone()
|
||||
|
||||
|
||||
|
804
examples/conditional_generation/peft_prompt_tuning_seq2seq.ipynb
Normal file
804
examples/conditional_generation/peft_prompt_tuning_seq2seq.ipynb
Normal file
@ -0,0 +1,804 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "5f93b7d1",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:37:58.711225Z",
|
||||
"start_time": "2023-05-30T08:37:56.881307Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"===================================BUG REPORT===================================\n",
|
||||
"Welcome to bitsandbytes. For bug reports, please run\n",
|
||||
"\n",
|
||||
"python -m bitsandbytes\n",
|
||||
"\n",
|
||||
" and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues\n",
|
||||
"================================================================================\n",
|
||||
"bin /udir/tschilla/anaconda3/envs/peft/lib/python3.9/site-packages/bitsandbytes/libbitsandbytes_cuda117.so\n",
|
||||
"CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching in backup paths...\n",
|
||||
"CUDA SETUP: CUDA runtime path found: /usr/local/cuda/lib64/libcudart.so.11.0\n",
|
||||
"CUDA SETUP: Highest compute capability among GPUs detected: 8.0\n",
|
||||
"CUDA SETUP: Detected CUDA version 117\n",
|
||||
"CUDA SETUP: Loading binary /udir/tschilla/anaconda3/envs/peft/lib/python3.9/site-packages/bitsandbytes/libbitsandbytes_cuda117.so...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/udir/tschilla/anaconda3/envs/peft/lib/python3.9/site-packages/bitsandbytes/cuda_setup/main.py:149: UserWarning: /udir/tschilla/anaconda3 did not contain ['libcudart.so', 'libcudart.so.11.0', 'libcudart.so.12.0'] as expected! Searching further paths...\n",
|
||||
" warn(msg)\n",
|
||||
"/udir/tschilla/anaconda3/envs/peft/lib/python3.9/site-packages/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('Europe/Paris')}\n",
|
||||
" warn(msg)\n",
|
||||
"/udir/tschilla/anaconda3/envs/peft/lib/python3.9/site-packages/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('/udir/tschilla/.cache/dotnet_bundle_extract')}\n",
|
||||
" warn(msg)\n",
|
||||
"/udir/tschilla/anaconda3/envs/peft/lib/python3.9/site-packages/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('5002'), PosixPath('http'), PosixPath('//127.0.0.1')}\n",
|
||||
" warn(msg)\n",
|
||||
"/udir/tschilla/anaconda3/envs/peft/lib/python3.9/site-packages/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('() { ( alias;\\n eval ${which_declare} ) | /usr/bin/which --tty-only --read-alias --read-functions --show-tilde --show-dot $@\\n}')}\n",
|
||||
" warn(msg)\n",
|
||||
"/udir/tschilla/anaconda3/envs/peft/lib/python3.9/site-packages/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('module'), PosixPath('//matplotlib_inline.backend_inline')}\n",
|
||||
" warn(msg)\n",
|
||||
"/udir/tschilla/anaconda3/envs/peft/lib/python3.9/site-packages/bitsandbytes/cuda_setup/main.py:149: UserWarning: Found duplicate ['libcudart.so', 'libcudart.so.11.0', 'libcudart.so.12.0'] files: {PosixPath('/usr/local/cuda/lib64/libcudart.so.11.0'), PosixPath('/usr/local/cuda/lib64/libcudart.so')}.. We'll flip a coin and try one of these, in order to fail forward.\n",
|
||||
"Either way, this might cause trouble in the future:\n",
|
||||
"If you get `CUDA error: invalid device function` errors, the above might be the cause and the solution is to make sure only one ['libcudart.so', 'libcudart.so.11.0', 'libcudart.so.12.0'] in the paths that we search based on your env.\n",
|
||||
" warn(msg)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"import torch\n",
|
||||
"from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, default_data_collator, get_linear_schedule_with_warmup\n",
|
||||
"from peft import get_peft_model, PromptTuningConfig, TaskType, PromptTuningInit\n",
|
||||
"from torch.utils.data import DataLoader\n",
|
||||
"from tqdm import tqdm\n",
|
||||
"from datasets import load_dataset\n",
|
||||
"\n",
|
||||
"os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n",
|
||||
"\n",
|
||||
"device = \"cuda\"\n",
|
||||
"model_name_or_path = \"t5-large\"\n",
|
||||
"tokenizer_name_or_path = \"t5-large\"\n",
|
||||
"\n",
|
||||
"checkpoint_name = \"financial_sentiment_analysis_prompt_tuning_v1.pt\"\n",
|
||||
"text_column = \"sentence\"\n",
|
||||
"label_column = \"text_label\"\n",
|
||||
"max_length = 128\n",
|
||||
"lr = 1\n",
|
||||
"num_epochs = 5\n",
|
||||
"batch_size = 8"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "8d0850ac",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:38:12.413984Z",
|
||||
"start_time": "2023-05-30T08:38:04.601042Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"trainable params: 40960 || all params: 737709056 || trainable%: 0.005552324411210698\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/udir/tschilla/anaconda3/envs/peft/lib/python3.9/site-packages/transformers/models/t5/tokenization_t5_fast.py:155: FutureWarning: This tokenizer was incorrectly instantiated with a model max length of 512 which will be corrected in Transformers v5.\n",
|
||||
"For now, this behavior is kept to avoid breaking backwards compatibility when padding/encoding with `truncation is True`.\n",
|
||||
"- Be aware that you SHOULD NOT rely on t5-large automatically truncating your input to 512 when padding/encoding.\n",
|
||||
"- If you want to encode/pad to sequences longer than 512 you can either instantiate this tokenizer with `model_max_length` or pass `max_length` when encoding/padding.\n",
|
||||
"- To avoid this warning, please instantiate this tokenizer with `model_max_length` set to your preferred value.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"PeftModelForSeq2SeqLM(\n",
|
||||
" (base_model): T5ForConditionalGeneration(\n",
|
||||
" (shared): Embedding(32128, 1024)\n",
|
||||
" (encoder): T5Stack(\n",
|
||||
" (embed_tokens): Embedding(32128, 1024)\n",
|
||||
" (block): ModuleList(\n",
|
||||
" (0): T5Block(\n",
|
||||
" (layer): ModuleList(\n",
|
||||
" (0): T5LayerSelfAttention(\n",
|
||||
" (SelfAttention): T5Attention(\n",
|
||||
" (q): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (k): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (v): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (o): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (relative_attention_bias): Embedding(32, 16)\n",
|
||||
" )\n",
|
||||
" (layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" (1): T5LayerFF(\n",
|
||||
" (DenseReluDense): T5DenseActDense(\n",
|
||||
" (wi): Linear(in_features=1024, out_features=4096, bias=False)\n",
|
||||
" (wo): Linear(in_features=4096, out_features=1024, bias=False)\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" (act): ReLU()\n",
|
||||
" )\n",
|
||||
" (layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" (1-23): 23 x T5Block(\n",
|
||||
" (layer): ModuleList(\n",
|
||||
" (0): T5LayerSelfAttention(\n",
|
||||
" (SelfAttention): T5Attention(\n",
|
||||
" (q): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (k): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (v): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (o): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" )\n",
|
||||
" (layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" (1): T5LayerFF(\n",
|
||||
" (DenseReluDense): T5DenseActDense(\n",
|
||||
" (wi): Linear(in_features=1024, out_features=4096, bias=False)\n",
|
||||
" (wo): Linear(in_features=4096, out_features=1024, bias=False)\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" (act): ReLU()\n",
|
||||
" )\n",
|
||||
" (layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" (final_layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" (decoder): T5Stack(\n",
|
||||
" (embed_tokens): Embedding(32128, 1024)\n",
|
||||
" (block): ModuleList(\n",
|
||||
" (0): T5Block(\n",
|
||||
" (layer): ModuleList(\n",
|
||||
" (0): T5LayerSelfAttention(\n",
|
||||
" (SelfAttention): T5Attention(\n",
|
||||
" (q): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (k): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (v): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (o): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (relative_attention_bias): Embedding(32, 16)\n",
|
||||
" )\n",
|
||||
" (layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" (1): T5LayerCrossAttention(\n",
|
||||
" (EncDecAttention): T5Attention(\n",
|
||||
" (q): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (k): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (v): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (o): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" )\n",
|
||||
" (layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" (2): T5LayerFF(\n",
|
||||
" (DenseReluDense): T5DenseActDense(\n",
|
||||
" (wi): Linear(in_features=1024, out_features=4096, bias=False)\n",
|
||||
" (wo): Linear(in_features=4096, out_features=1024, bias=False)\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" (act): ReLU()\n",
|
||||
" )\n",
|
||||
" (layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" (1-23): 23 x T5Block(\n",
|
||||
" (layer): ModuleList(\n",
|
||||
" (0): T5LayerSelfAttention(\n",
|
||||
" (SelfAttention): T5Attention(\n",
|
||||
" (q): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (k): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (v): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (o): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" )\n",
|
||||
" (layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" (1): T5LayerCrossAttention(\n",
|
||||
" (EncDecAttention): T5Attention(\n",
|
||||
" (q): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (k): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (v): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" (o): Linear(in_features=1024, out_features=1024, bias=False)\n",
|
||||
" )\n",
|
||||
" (layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" (2): T5LayerFF(\n",
|
||||
" (DenseReluDense): T5DenseActDense(\n",
|
||||
" (wi): Linear(in_features=1024, out_features=4096, bias=False)\n",
|
||||
" (wo): Linear(in_features=4096, out_features=1024, bias=False)\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" (act): ReLU()\n",
|
||||
" )\n",
|
||||
" (layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" (final_layer_norm): T5LayerNorm()\n",
|
||||
" (dropout): Dropout(p=0.1, inplace=False)\n",
|
||||
" )\n",
|
||||
" (lm_head): Linear(in_features=1024, out_features=32128, bias=False)\n",
|
||||
" )\n",
|
||||
" (prompt_encoder): ModuleDict(\n",
|
||||
" (default): PromptEmbedding(\n",
|
||||
" (embedding): Embedding(40, 1024)\n",
|
||||
" )\n",
|
||||
" )\n",
|
||||
" (word_embeddings): Embedding(32128, 1024)\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# creating model\n",
|
||||
"peft_config = PromptTuningConfig(\n",
|
||||
" task_type=TaskType.SEQ_2_SEQ_LM,\n",
|
||||
" prompt_tuning_init=PromptTuningInit.TEXT,\n",
|
||||
" num_virtual_tokens=20,\n",
|
||||
" prompt_tuning_init_text=\"What is the sentiment of this article?\\n\",\n",
|
||||
" inference_mode=False,\n",
|
||||
" tokenizer_name_or_path=model_name_or_path,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)\n",
|
||||
"model = get_peft_model(model, peft_config)\n",
|
||||
"model.print_trainable_parameters()\n",
|
||||
"model"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "4ee2babf",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:38:18.759143Z",
|
||||
"start_time": "2023-05-30T08:38:17.881621Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Found cached dataset financial_phrasebank (/data/proxem/huggingface/datasets/financial_phrasebank/sentences_allagree/1.0.0/550bde12e6c30e2674da973a55f57edde5181d53f5a5a34c1531c53f93b7e141)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "fb63f50cb7cb4f5aae10648ba74d6c4e",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
" 0%| | 0/1 [00:00<?, ?it/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Map: 0%| | 0/2037 [00:00<?, ? examples/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Map: 0%| | 0/227 [00:00<?, ? examples/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'sentence': '`` Lining stone sales were also good in the early autumn , and order books are strong to the end of the year .',\n",
|
||||
" 'label': 2,\n",
|
||||
" 'text_label': 'positive'}"
|
||||
]
|
||||
},
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# loading dataset\n",
|
||||
"dataset = load_dataset(\"financial_phrasebank\", \"sentences_allagree\")\n",
|
||||
"dataset = dataset[\"train\"].train_test_split(test_size=0.1)\n",
|
||||
"dataset[\"validation\"] = dataset[\"test\"]\n",
|
||||
"del dataset[\"test\"]\n",
|
||||
"\n",
|
||||
"classes = dataset[\"train\"].features[\"label\"].names\n",
|
||||
"dataset = dataset.map(\n",
|
||||
" lambda x: {\"text_label\": [classes[label] for label in x[\"label\"]]},\n",
|
||||
" batched=True,\n",
|
||||
" num_proc=1,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"dataset[\"train\"][0]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "adf9608c",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:38:21.132266Z",
|
||||
"start_time": "2023-05-30T08:38:20.340722Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Running tokenizer on dataset: 0%| | 0/2037 [00:00<?, ? examples/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Running tokenizer on dataset: 0%| | 0/227 [00:00<?, ? examples/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# data preprocessing\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)\n",
|
||||
"target_max_length = max([len(tokenizer(class_label)[\"input_ids\"]) for class_label in classes])\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"def preprocess_function(examples):\n",
|
||||
" inputs = examples[text_column]\n",
|
||||
" targets = examples[label_column]\n",
|
||||
" model_inputs = tokenizer(inputs, max_length=max_length, padding=\"max_length\", truncation=True, return_tensors=\"pt\")\n",
|
||||
" labels = tokenizer(\n",
|
||||
" targets, max_length=target_max_length, padding=\"max_length\", truncation=True, return_tensors=\"pt\"\n",
|
||||
" )\n",
|
||||
" labels = labels[\"input_ids\"]\n",
|
||||
" labels[labels == tokenizer.pad_token_id] = -100\n",
|
||||
" model_inputs[\"labels\"] = labels\n",
|
||||
" return model_inputs\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"processed_datasets = dataset.map(\n",
|
||||
" preprocess_function,\n",
|
||||
" batched=True,\n",
|
||||
" num_proc=1,\n",
|
||||
" remove_columns=dataset[\"train\"].column_names,\n",
|
||||
" load_from_cache_file=False,\n",
|
||||
" desc=\"Running tokenizer on dataset\",\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"train_dataset = processed_datasets[\"train\"]\n",
|
||||
"eval_dataset = processed_datasets[\"validation\"]\n",
|
||||
"\n",
|
||||
"train_dataloader = DataLoader(\n",
|
||||
" train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True\n",
|
||||
")\n",
|
||||
"eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "f733a3c6",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:38:22.907922Z",
|
||||
"start_time": "2023-05-30T08:38:22.901057Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# optimizer and lr scheduler\n",
|
||||
"optimizer = torch.optim.AdamW(model.parameters(), lr=lr)\n",
|
||||
"lr_scheduler = get_linear_schedule_with_warmup(\n",
|
||||
" optimizer=optimizer,\n",
|
||||
" num_warmup_steps=0,\n",
|
||||
" num_training_steps=(len(train_dataloader) * num_epochs),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "6b3a4090",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:42:29.409070Z",
|
||||
"start_time": "2023-05-30T08:38:50.102263Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 255/255 [00:42<00:00, 6.05it/s]\n",
|
||||
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 29/29 [00:02<00:00, 14.40it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch=0: train_ppl=tensor(8.0846, device='cuda:0') train_epoch_loss=tensor(2.0900, device='cuda:0') eval_ppl=tensor(1.3542, device='cuda:0') eval_epoch_loss=tensor(0.3032, device='cuda:0')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 255/255 [00:41<00:00, 6.15it/s]\n",
|
||||
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 29/29 [00:02<00:00, 14.42it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch=1: train_ppl=tensor(1.5088, device='cuda:0') train_epoch_loss=tensor(0.4113, device='cuda:0') eval_ppl=tensor(1.2692, device='cuda:0') eval_epoch_loss=tensor(0.2384, device='cuda:0')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 255/255 [00:41<00:00, 6.18it/s]\n",
|
||||
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 29/29 [00:02<00:00, 14.45it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch=2: train_ppl=tensor(1.5322, device='cuda:0') train_epoch_loss=tensor(0.4267, device='cuda:0') eval_ppl=tensor(1.2065, device='cuda:0') eval_epoch_loss=tensor(0.1877, device='cuda:0')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 255/255 [00:41<00:00, 6.17it/s]\n",
|
||||
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 29/29 [00:02<00:00, 14.38it/s]\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch=3: train_ppl=tensor(1.4475, device='cuda:0') train_epoch_loss=tensor(0.3699, device='cuda:0') eval_ppl=tensor(1.2346, device='cuda:0') eval_epoch_loss=tensor(0.2107, device='cuda:0')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 255/255 [00:42<00:00, 5.94it/s]\n",
|
||||
"100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 29/29 [00:02<00:00, 14.42it/s]"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"epoch=4: train_ppl=tensor(1.3428, device='cuda:0') train_epoch_loss=tensor(0.2948, device='cuda:0') eval_ppl=tensor(1.2041, device='cuda:0') eval_epoch_loss=tensor(0.1857, device='cuda:0')\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# training and evaluation\n",
|
||||
"model = model.to(device)\n",
|
||||
"\n",
|
||||
"for epoch in range(num_epochs):\n",
|
||||
" model.train()\n",
|
||||
" total_loss = 0\n",
|
||||
" for step, batch in enumerate(tqdm(train_dataloader)):\n",
|
||||
" batch = {k: v.to(device) for k, v in batch.items()}\n",
|
||||
" outputs = model(**batch)\n",
|
||||
" loss = outputs.loss\n",
|
||||
" total_loss += loss.detach().float()\n",
|
||||
" loss.backward()\n",
|
||||
" optimizer.step()\n",
|
||||
" lr_scheduler.step()\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
"\n",
|
||||
" model.eval()\n",
|
||||
" eval_loss = 0\n",
|
||||
" eval_preds = []\n",
|
||||
" for step, batch in enumerate(tqdm(eval_dataloader)):\n",
|
||||
" batch = {k: v.to(device) for k, v in batch.items()}\n",
|
||||
" with torch.no_grad():\n",
|
||||
" outputs = model(**batch)\n",
|
||||
" loss = outputs.loss\n",
|
||||
" eval_loss += loss.detach().float()\n",
|
||||
" eval_preds.extend(\n",
|
||||
" tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" eval_epoch_loss = eval_loss / len(eval_dataloader)\n",
|
||||
" eval_ppl = torch.exp(eval_epoch_loss)\n",
|
||||
" train_epoch_loss = total_loss / len(train_dataloader)\n",
|
||||
" train_ppl = torch.exp(train_epoch_loss)\n",
|
||||
" print(f\"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "6cafa67b",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:42:42.844671Z",
|
||||
"start_time": "2023-05-30T08:42:42.840447Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"accuracy=85.46255506607929 % on the evaluation dataset\n",
|
||||
"eval_preds[:10]=['neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'positive', 'neutral', 'negative', 'neutral', 'positive']\n",
|
||||
"dataset['validation']['text_label'][:10]=['neutral', 'neutral', 'neutral', 'neutral', 'neutral', 'positive', 'neutral', 'negative', 'positive', 'neutral']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# print accuracy\n",
|
||||
"correct = 0\n",
|
||||
"total = 0\n",
|
||||
"for pred, true in zip(eval_preds, dataset[\"validation\"][\"text_label\"]):\n",
|
||||
" if pred.strip() == true.strip():\n",
|
||||
" correct += 1\n",
|
||||
" total += 1\n",
|
||||
"accuracy = correct / total * 100\n",
|
||||
"print(f\"{accuracy=} % on the evaluation dataset\")\n",
|
||||
"print(f\"{eval_preds[:10]=}\")\n",
|
||||
"print(f\"{dataset['validation']['text_label'][:10]=}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "a8de6005",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:42:45.752765Z",
|
||||
"start_time": "2023-05-30T08:42:45.742397Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# saving model\n",
|
||||
"peft_model_id = f\"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\"\n",
|
||||
"model.save_pretrained(peft_model_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "bd20cd4c",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:42:47.660873Z",
|
||||
"start_time": "2023-05-30T08:42:47.488293Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"164K\tt5-large_PROMPT_TUNING_SEQ_2_SEQ_LM/adapter_model.bin\r\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"ckpt = f\"{peft_model_id}/adapter_model.bin\"\n",
|
||||
"!du -h $ckpt"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "76c2fc29",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:42:56.721990Z",
|
||||
"start_time": "2023-05-30T08:42:49.060700Z"
|
||||
}
|
||||
},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from peft import PeftModel, PeftConfig\n",
|
||||
"\n",
|
||||
"peft_model_id = f\"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}\"\n",
|
||||
"\n",
|
||||
"config = PeftConfig.from_pretrained(peft_model_id)\n",
|
||||
"model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path)\n",
|
||||
"model = PeftModel.from_pretrained(model, peft_model_id)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "d997f1cc",
|
||||
"metadata": {
|
||||
"ExecuteTime": {
|
||||
"end_time": "2023-05-30T08:42:59.600916Z",
|
||||
"start_time": "2023-05-30T08:42:58.961468Z"
|
||||
}
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Danske Bank is Denmark 's largest bank with 3.5 million customers .\n",
|
||||
"tensor([[ 3039, 1050, 1925, 19, 18001, 3, 31, 7, 2015, 2137,\n",
|
||||
" 28, 3, 9285, 770, 722, 3, 5, 1]])\n",
|
||||
"tensor([[ 0, 7163, 1]])\n",
|
||||
"['neutral']\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"model.eval()\n",
|
||||
"i = 107\n",
|
||||
"input_ids = tokenizer(dataset[\"validation\"][text_column][i], return_tensors=\"pt\").input_ids\n",
|
||||
"print(dataset[\"validation\"][text_column][i])\n",
|
||||
"print(input_ids)\n",
|
||||
"\n",
|
||||
"with torch.no_grad():\n",
|
||||
" outputs = model.generate(input_ids=input_ids, max_new_tokens=10)\n",
|
||||
" print(outputs)\n",
|
||||
" print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "peft",
|
||||
"language": "python",
|
||||
"name": "peft"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
},
|
||||
"toc": {
|
||||
"base_numbering": 1,
|
||||
"nav_menu": {},
|
||||
"number_sections": true,
|
||||
"sideBar": true,
|
||||
"skip_h1_title": false,
|
||||
"title_cell": "Table of Contents",
|
||||
"title_sidebar": "Contents",
|
||||
"toc_cell": false,
|
||||
"toc_position": {},
|
||||
"toc_section_display": true,
|
||||
"toc_window_display": false
|
||||
},
|
||||
"varInspector": {
|
||||
"cols": {
|
||||
"lenName": 16,
|
||||
"lenType": 16,
|
||||
"lenVar": 40
|
||||
},
|
||||
"kernels_config": {
|
||||
"python": {
|
||||
"delete_cmd_postfix": "",
|
||||
"delete_cmd_prefix": "del ",
|
||||
"library": "var_list.py",
|
||||
"varRefreshCmd": "print(var_dic_list())"
|
||||
},
|
||||
"r": {
|
||||
"delete_cmd_postfix": ") ",
|
||||
"delete_cmd_prefix": "rm(",
|
||||
"library": "var_list.r",
|
||||
"varRefreshCmd": "cat(var_dic_list()) "
|
||||
}
|
||||
},
|
||||
"types_to_exclude": [
|
||||
"module",
|
||||
"function",
|
||||
"builtin_function_or_method",
|
||||
"instance",
|
||||
"_Feature"
|
||||
],
|
||||
"window_display": false
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "aee8b7b246df8f9039afb4144a1f6fd8d2ca17a180786b69acc140d282b71a49"
|
||||
}
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
File diff suppressed because one or more lines are too long
@ -1,6 +1,5 @@
|
||||
transformers
|
||||
accelerate
|
||||
loralib
|
||||
evaluate
|
||||
deepspeed
|
||||
tqdm
|
||||
|
@ -0,0 +1,495 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2023-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
from pathlib import Path
|
||||
|
||||
import datasets
|
||||
import evaluate
|
||||
import torch
|
||||
import transformers
|
||||
from accelerate import Accelerator
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import set_seed
|
||||
from datasets import DatasetDict, load_dataset
|
||||
from huggingface_hub import Repository, create_repo
|
||||
from torch import nn
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm import tqdm
|
||||
from transformers import AutoModel, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler
|
||||
from transformers.utils import get_full_repo_name
|
||||
|
||||
from peft import LoraConfig, TaskType, get_peft_model
|
||||
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="Training a PEFT model for Sematic Search task")
|
||||
parser.add_argument("--dataset_name", type=str, default=None, help="dataset name on HF hub")
|
||||
parser.add_argument(
|
||||
"--max_length",
|
||||
type=int,
|
||||
default=128,
|
||||
help=(
|
||||
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
|
||||
" sequences shorter will be padded if `--pad_to_max_length` is passed."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model_name_or_path",
|
||||
type=str,
|
||||
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
||||
required=True,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--per_device_train_batch_size",
|
||||
type=int,
|
||||
default=8,
|
||||
help="Batch size (per device) for the training dataloader.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--per_device_eval_batch_size",
|
||||
type=int,
|
||||
default=8,
|
||||
help="Batch size (per device) for the evaluation dataloader.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--learning_rate",
|
||||
type=float,
|
||||
default=5e-5,
|
||||
help="Initial learning rate (after the potential warmup period) to use.",
|
||||
)
|
||||
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
|
||||
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
|
||||
parser.add_argument(
|
||||
"--max_train_steps",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gradient_accumulation_steps",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--lr_scheduler_type",
|
||||
type=SchedulerType,
|
||||
default="linear",
|
||||
help="The scheduler type to use.",
|
||||
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
|
||||
)
|
||||
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
|
||||
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
|
||||
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
||||
parser.add_argument(
|
||||
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
|
||||
)
|
||||
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
|
||||
parser.add_argument(
|
||||
"--checkpointing_steps",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--resume_from_checkpoint",
|
||||
type=str,
|
||||
default=None,
|
||||
help="If the training should continue from a checkpoint folder.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--with_tracking",
|
||||
action="store_true",
|
||||
help="Whether to enable experiment trackers for logging.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--report_to",
|
||||
type=str,
|
||||
default="all",
|
||||
help=(
|
||||
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
|
||||
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
|
||||
"Only applicable when `--with_tracking` is passed."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sanity_test",
|
||||
action="store_true",
|
||||
help="Whether to enable sanity test.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use_peft",
|
||||
action="store_true",
|
||||
help="Whether to use PEFT.",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.push_to_hub:
|
||||
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def save_model_hook(models, weights, output_dir):
|
||||
for i, model in enumerate(models):
|
||||
model.save_pretrained(output_dir, state_dict=weights[i])
|
||||
# make sure to pop weight so that corresponding model is not saved again
|
||||
weights.pop()
|
||||
|
||||
|
||||
def load_model_hook(models, input_dir):
|
||||
while len(models) > 0:
|
||||
model = models.pop()
|
||||
# pop models so that they are not loaded again
|
||||
if hasattr(model, "active_adapter") and hasattr(model, "load_adapter"):
|
||||
model.load_adapter(input_dir, model.active_adapter, is_trainable=True)
|
||||
|
||||
|
||||
class AutoModelForSentenceEmbedding(nn.Module):
|
||||
def __init__(self, model_name, tokenizer, normalize=True):
|
||||
super(AutoModelForSentenceEmbedding, self).__init__()
|
||||
|
||||
self.model = AutoModel.from_pretrained(model_name) # , load_in_8bit=True, device_map={"":0})
|
||||
self.normalize = normalize
|
||||
self.tokenizer = tokenizer
|
||||
|
||||
def forward(self, **kwargs):
|
||||
model_output = self.model(**kwargs)
|
||||
embeddings = self.mean_pooling(model_output, kwargs["attention_mask"])
|
||||
if self.normalize:
|
||||
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
||||
|
||||
return embeddings
|
||||
|
||||
def mean_pooling(self, model_output, attention_mask):
|
||||
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
|
||||
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
|
||||
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
|
||||
|
||||
def __getattr__(self, name: str):
|
||||
"""Forward missing attributes to the wrapped module."""
|
||||
try:
|
||||
return super().__getattr__(name) # defer to nn.Module's logic
|
||||
except AttributeError:
|
||||
return getattr(self.model, name)
|
||||
|
||||
|
||||
def get_cosing_embeddings(query_embs, product_embs):
|
||||
return torch.sum(query_embs * product_embs, axis=1)
|
||||
|
||||
|
||||
def get_loss(cosine_score, labels):
|
||||
return torch.mean(torch.square(labels * (1 - cosine_score) + torch.clamp((1 - labels) * cosine_score, min=0.0)))
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
|
||||
accelerator_kwargs = {"gradient_accumulation_steps": args.gradient_accumulation_steps}
|
||||
if args.with_tracking:
|
||||
accelerator_kwargs["log_with"] = args.report_to
|
||||
accelerator_kwargs["project_dir"] = args.output_dir
|
||||
accelerator = Accelerator(**accelerator_kwargs)
|
||||
|
||||
# Make one log on every process with the configuration for debugging.
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
level=logging.INFO,
|
||||
)
|
||||
logger.info(accelerator.state, main_process_only=False)
|
||||
if accelerator.is_local_main_process:
|
||||
datasets.utils.logging.set_verbosity_warning()
|
||||
transformers.utils.logging.set_verbosity_info()
|
||||
else:
|
||||
datasets.utils.logging.set_verbosity_error()
|
||||
transformers.utils.logging.set_verbosity_error()
|
||||
|
||||
# If passed along, set the training seed now.
|
||||
if args.seed is not None:
|
||||
set_seed(args.seed)
|
||||
|
||||
# Handle the repository creation
|
||||
if accelerator.is_main_process:
|
||||
if args.push_to_hub:
|
||||
if args.hub_model_id is None:
|
||||
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
|
||||
else:
|
||||
repo_name = args.hub_model_id
|
||||
create_repo(repo_name, exist_ok=True, token=args.hub_token)
|
||||
repo = Repository(args.output_dir, clone_from=repo_name, token=args.hub_token)
|
||||
|
||||
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
|
||||
if "step_*" not in gitignore:
|
||||
gitignore.write("step_*\n")
|
||||
if "epoch_*" not in gitignore:
|
||||
gitignore.write("epoch_*\n")
|
||||
elif args.output_dir is not None:
|
||||
os.makedirs(args.output_dir, exist_ok=True)
|
||||
accelerator.wait_for_everyone()
|
||||
|
||||
# get the tokenizer
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
|
||||
|
||||
# dataset download and preprocessing
|
||||
if args.sanity_test:
|
||||
train_dataset = load_dataset("smangrul/amazon_esci", split="train[:1024]")
|
||||
val_dataset = load_dataset("smangrul/amazon_esci", split="validation[:1024]")
|
||||
|
||||
dataset = DatasetDict({"train": train_dataset, "validation": val_dataset})
|
||||
else:
|
||||
dataset = load_dataset(args.dataset_name)
|
||||
|
||||
def preprocess_function(examples):
|
||||
queries = examples["query"]
|
||||
result = tokenizer(queries, padding="max_length", max_length=70, truncation=True)
|
||||
result = {f"query_{k}": v for k, v in result.items()}
|
||||
|
||||
products = examples["product_title"]
|
||||
result_products = tokenizer(products, padding="max_length", max_length=70, truncation=True)
|
||||
for k, v in result_products.items():
|
||||
result[f"product_{k}"] = v
|
||||
|
||||
result["labels"] = examples["relevance_label"]
|
||||
return result
|
||||
|
||||
processed_datasets = dataset.map(
|
||||
preprocess_function,
|
||||
batched=True,
|
||||
remove_columns=dataset["train"].column_names,
|
||||
desc="Running tokenizer on dataset",
|
||||
)
|
||||
|
||||
# Log a few random samples from the training set:
|
||||
for index in random.sample(range(len(processed_datasets["train"])), 3):
|
||||
logger.info(f"Sample {index} of the training set: {processed_datasets['train'][index]}.")
|
||||
|
||||
# base model
|
||||
model = AutoModelForSentenceEmbedding(args.model_name_or_path, tokenizer)
|
||||
|
||||
if args.use_peft:
|
||||
# peft config and wrapping
|
||||
peft_config = LoraConfig(
|
||||
r=8,
|
||||
lora_alpha=16,
|
||||
bias="none",
|
||||
task_type=TaskType.FEATURE_EXTRACTION,
|
||||
target_modules=["key", "query", "value"],
|
||||
)
|
||||
model = get_peft_model(model, peft_config)
|
||||
model.print_trainable_parameters()
|
||||
|
||||
accelerator.print(model)
|
||||
|
||||
# get dataloaders
|
||||
train_dataloader = DataLoader(
|
||||
processed_datasets["train"],
|
||||
shuffle=True,
|
||||
collate_fn=default_data_collator,
|
||||
batch_size=args.per_device_train_batch_size,
|
||||
pin_memory=True,
|
||||
)
|
||||
|
||||
eval_dataloader = DataLoader(
|
||||
processed_datasets["validation"],
|
||||
shuffle=False,
|
||||
collate_fn=default_data_collator,
|
||||
batch_size=args.per_device_eval_batch_size,
|
||||
pin_memory=True,
|
||||
)
|
||||
|
||||
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
|
||||
|
||||
# Scheduler and math around the number of training steps.
|
||||
overrode_max_train_steps = False
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
||||
if args.max_train_steps is None:
|
||||
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
||||
overrode_max_train_steps = True
|
||||
|
||||
lr_scheduler = get_scheduler(
|
||||
name=args.lr_scheduler_type,
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=args.num_warmup_steps,
|
||||
num_training_steps=args.max_train_steps,
|
||||
)
|
||||
|
||||
# Prepare everything with our `accelerator`.
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
||||
if overrode_max_train_steps:
|
||||
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
||||
# Afterwards we recalculate our number of training epochs
|
||||
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
||||
|
||||
# Figure out how many steps we should save the Accelerator states
|
||||
checkpointing_steps = args.checkpointing_steps
|
||||
if checkpointing_steps is not None and checkpointing_steps.isdigit():
|
||||
checkpointing_steps = int(checkpointing_steps)
|
||||
|
||||
# We need to initialize the trackers we use, and also store our configuration.
|
||||
# The trackers initializes automatically on the main process.
|
||||
if args.with_tracking:
|
||||
experiment_config = vars(args)
|
||||
# TensorBoard cannot log Enums, need the raw value
|
||||
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
|
||||
accelerator.init_trackers("peft_semantic_search", experiment_config)
|
||||
|
||||
metric = evaluate.load("roc_auc")
|
||||
|
||||
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
||||
|
||||
if args.use_peft:
|
||||
# saving and loading checkpoints for resuming training
|
||||
accelerator.register_save_state_pre_hook(save_model_hook)
|
||||
accelerator.register_load_state_pre_hook(load_model_hook)
|
||||
|
||||
logger.info("***** Running training *****")
|
||||
logger.info(f" Num examples = {len(processed_datasets['train'])}")
|
||||
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
||||
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
|
||||
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
||||
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
||||
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
||||
|
||||
# Only show the progress bar once on each machine.
|
||||
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
||||
completed_steps = 0
|
||||
starting_epoch = 0
|
||||
# Potentially load in the weights and states from a previous save
|
||||
if args.resume_from_checkpoint:
|
||||
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
|
||||
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
|
||||
accelerator.load_state(args.resume_from_checkpoint)
|
||||
path = os.path.basename(args.resume_from_checkpoint)
|
||||
else:
|
||||
# Get the most recent checkpoint
|
||||
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
|
||||
dirs.sort(key=os.path.getctime)
|
||||
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
|
||||
# Extract `epoch_{i}` or `step_{i}`
|
||||
training_difference = os.path.splitext(path)[0]
|
||||
|
||||
if "epoch" in training_difference:
|
||||
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
|
||||
resume_step = None
|
||||
completed_steps = starting_epoch * num_update_steps_per_epoch
|
||||
else:
|
||||
# need to multiply `gradient_accumulation_steps` to reflect real steps
|
||||
resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps
|
||||
starting_epoch = resume_step // len(train_dataloader)
|
||||
resume_step -= starting_epoch * len(train_dataloader)
|
||||
completed_steps = resume_step // args.gradient_accumulation_steps
|
||||
|
||||
# update the progress_bar if load from checkpoint
|
||||
progress_bar.update(completed_steps)
|
||||
|
||||
for epoch in range(starting_epoch, args.num_train_epochs):
|
||||
model.train()
|
||||
if args.with_tracking:
|
||||
total_loss = 0
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We skip the first `n` batches in the dataloader when resuming from a checkpoint
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
else:
|
||||
active_dataloader = train_dataloader
|
||||
for step, batch in enumerate(active_dataloader):
|
||||
with accelerator.accumulate(model):
|
||||
query_embs = model(**{k.replace("query_", ""): v for k, v in batch.items() if "query" in k})
|
||||
product_embs = model(**{k.replace("product_", ""): v for k, v in batch.items() if "product" in k})
|
||||
loss = get_loss(get_cosing_embeddings(query_embs, product_embs), batch["labels"])
|
||||
total_loss += accelerator.reduce(loss.detach().float(), reduction="sum")
|
||||
accelerator.backward(loss)
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
model.zero_grad()
|
||||
|
||||
# Checks if the accelerator has performed an optimization step behind the scenes
|
||||
if accelerator.sync_gradients:
|
||||
progress_bar.update(1)
|
||||
completed_steps += 1
|
||||
|
||||
if (step + 1) % 100 == 0:
|
||||
logger.info(f"Step: {step+1}, Loss: {total_loss/(step+1)}")
|
||||
if args.with_tracking:
|
||||
accelerator.log({"train/loss": total_loss / (step + 1)}, step=completed_steps)
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
output_dir = f"step_{completed_steps }"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
accelerator.save_state(output_dir)
|
||||
|
||||
if completed_steps >= args.max_train_steps:
|
||||
break
|
||||
|
||||
model.eval()
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
with torch.no_grad():
|
||||
query_embs = model(**{k.replace("query_", ""): v for k, v in batch.items() if "query" in k})
|
||||
product_embs = model(**{k.replace("product_", ""): v for k, v in batch.items() if "product" in k})
|
||||
prediction_scores = get_cosing_embeddings(query_embs, product_embs)
|
||||
prediction_scores, references = accelerator.gather_for_metrics((prediction_scores, batch["labels"]))
|
||||
metric.add_batch(
|
||||
prediction_scores=prediction_scores,
|
||||
references=references,
|
||||
)
|
||||
|
||||
result = metric.compute()
|
||||
result = {f"eval/{k}": v for k, v in result.items()}
|
||||
# Use accelerator.print to print only on the main process.
|
||||
accelerator.print(f"epoch {epoch}:", result)
|
||||
if args.with_tracking:
|
||||
result["train/epoch_loss"] = total_loss.item() / len(train_dataloader)
|
||||
accelerator.log(result, step=completed_steps)
|
||||
|
||||
if args.output_dir is not None:
|
||||
accelerator.wait_for_everyone()
|
||||
if accelerator.is_main_process:
|
||||
if isinstance(checkpointing_steps, str):
|
||||
accelerator.save_state(os.path.join(args.output_dir, f"epoch_{epoch}"))
|
||||
accelerator.unwrap_model(model).save_pretrained(
|
||||
args.output_dir, state_dict=accelerator.get_state_dict(accelerator.unwrap_model(model))
|
||||
)
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
if args.push_to_hub:
|
||||
commit_message = (
|
||||
f"Training in progress epoch {epoch}"
|
||||
if epoch < args.num_train_epochs - 1
|
||||
else "End of training"
|
||||
)
|
||||
repo.push_to_hub(commit_message=commit_message, blocking=False, auto_lfs_prune=True)
|
||||
accelerator.wait_for_everyone()
|
||||
accelerator.end_training()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
File diff suppressed because it is too large
Load Diff
10
examples/feature_extraction/requirements.txt
Normal file
10
examples/feature_extraction/requirements.txt
Normal file
@ -0,0 +1,10 @@
|
||||
git+https://github.com/huggingface/peft
|
||||
git+https://github.com/huggingface/accelerate
|
||||
git+https://github.com/huggingface/transformers
|
||||
datasets
|
||||
evaluate
|
||||
hnswlib
|
||||
pandas
|
||||
tqdm
|
||||
huggingface_hub
|
||||
wandb
|
193
examples/fp4_finetuning/finetune_fp4_opt_bnb_peft.py
Executable file
193
examples/fp4_finetuning/finetune_fp4_opt_bnb_peft.py
Executable file
@ -0,0 +1,193 @@
|
||||
import os
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import transformers
|
||||
from datasets import load_dataset
|
||||
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
||||
|
||||
from peft import LoraConfig, get_peft_model
|
||||
|
||||
|
||||
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
"""Finetune-opt-bnb-peft.ipynb
|
||||
|
||||
Automatically generated by Colaboratory.
|
||||
|
||||
Original file is located at
|
||||
https://colab.research.google.com/drive/1jCkpikz0J2o20FBQmYmAGdiKmJGOMo-o
|
||||
|
||||
## Fine-tune large models using 🤗 `peft` adapters, `transformers` & `bitsandbytes`
|
||||
|
||||
In this tutorial we will cover how we can fine-tune large language models using the very recent `peft` library and `bitsandbytes` for loading large models in 8-bit.
|
||||
The fine-tuning method will rely on a recent method called "Low Rank Adapters" (LoRA), instead of fine-tuning the entire model you just have to fine-tune these adapters and load them properly inside the model.
|
||||
After fine-tuning the model you can also share your adapters on the 🤗 Hub and load them very easily. Let's get started!
|
||||
|
||||
### Install requirements
|
||||
|
||||
First, run the cells below to install the requirements:
|
||||
"""
|
||||
|
||||
|
||||
"""### Model loading
|
||||
|
||||
Here let's load the `opt-6.7b` model, its weights in half-precision (float16) are about 13GB on the Hub! If we load them in 8-bit we would require around 7GB of memory instead.
|
||||
"""
|
||||
|
||||
|
||||
free_in_GB = int(torch.cuda.mem_get_info()[0] / 1024**3)
|
||||
max_memory = f"{free_in_GB-2}GB"
|
||||
|
||||
n_gpus = torch.cuda.device_count()
|
||||
max_memory = {i: max_memory for i in range(n_gpus)}
|
||||
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
"facebook/opt-350m",
|
||||
max_memory=max_memory,
|
||||
quantization_config=BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
llm_int8_threshold=6.0,
|
||||
llm_int8_has_fp16_weight=False,
|
||||
bnb_4bit_compute_dtype=torch.float16,
|
||||
bnb_4bit_use_double_quant=True,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
),
|
||||
torch_dtype=torch.float16,
|
||||
)
|
||||
|
||||
tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
|
||||
|
||||
"""### Post-processing on the model
|
||||
|
||||
Finally, we need to apply some post-processing on the 8-bit model to enable training, let's freeze all our layers, and cast the layer-norm in `float32` for stability. We also cast the output of the last layer in `float32` for the same reasons.
|
||||
"""
|
||||
|
||||
print(model)
|
||||
|
||||
for param in model.parameters():
|
||||
param.requires_grad = False # freeze the model - train adapters later
|
||||
if param.ndim == 1:
|
||||
# cast the small parameters (e.g. layernorm) to fp32 for stability
|
||||
param.data = param.data.to(torch.float32)
|
||||
|
||||
# model.gradient_checkpointing_enable() # reduce number of stored activations
|
||||
# model.model.decoder.project_in = lambda x: x.requires_grad_(True)
|
||||
|
||||
|
||||
class CastOutputToFloat(nn.Sequential):
|
||||
def forward(self, x):
|
||||
return super().forward(x).to(torch.float32)
|
||||
|
||||
|
||||
model.lm_head = CastOutputToFloat(model.lm_head)
|
||||
|
||||
"""### Apply LoRA
|
||||
|
||||
Here comes the magic with `peft`! Let's load a `PeftModel` and specify that we are going to use low-rank adapters (LoRA) using `get_peft_model` utility function from `peft`.
|
||||
"""
|
||||
|
||||
|
||||
def print_trainable_parameters(model):
|
||||
"""
|
||||
Prints the number of trainable parameters in the model.
|
||||
"""
|
||||
trainable_params = 0
|
||||
all_param = 0
|
||||
for _, param in model.named_parameters():
|
||||
all_param += param.numel()
|
||||
if param.requires_grad:
|
||||
trainable_params += param.numel()
|
||||
print(
|
||||
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}"
|
||||
)
|
||||
|
||||
|
||||
config = LoraConfig(
|
||||
r=64,
|
||||
lora_alpha=32,
|
||||
target_modules=["q_proj", "v_proj", "out_proj", "fc1", "fc2"],
|
||||
lora_dropout=0.01,
|
||||
bias="none",
|
||||
task_type="CAUSAL_LM",
|
||||
)
|
||||
|
||||
model = get_peft_model(model, config)
|
||||
print_trainable_parameters(model)
|
||||
|
||||
# Verifying the datatypes.
|
||||
dtypes = {}
|
||||
for _, p in model.named_parameters():
|
||||
dtype = p.dtype
|
||||
if dtype not in dtypes:
|
||||
dtypes[dtype] = 0
|
||||
dtypes[dtype] += p.numel()
|
||||
total = 0
|
||||
for k, v in dtypes.items():
|
||||
total += v
|
||||
for k, v in dtypes.items():
|
||||
print(k, v, v / total)
|
||||
|
||||
"""### Training"""
|
||||
|
||||
data = load_dataset("Abirate/english_quotes")
|
||||
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
|
||||
|
||||
trainer = transformers.Trainer(
|
||||
model=model,
|
||||
train_dataset=data["train"],
|
||||
args=transformers.TrainingArguments(
|
||||
per_device_train_batch_size=4,
|
||||
gradient_accumulation_steps=4,
|
||||
warmup_steps=10,
|
||||
max_steps=20,
|
||||
learning_rate=3e-4,
|
||||
fp16=True,
|
||||
logging_steps=1,
|
||||
output_dir="outputs",
|
||||
),
|
||||
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
|
||||
)
|
||||
model.config.use_cache = False # silence the warnings. Please re-enable for inference!
|
||||
trainer.train()
|
||||
|
||||
# from huggingface_hub import notebook_login
|
||||
|
||||
# notebook_login()
|
||||
|
||||
# model.push_to_hub("ybelkada/opt-6.7b-lora", use_auth_token=True)
|
||||
|
||||
"""## Load adapters from the Hub
|
||||
|
||||
You can also directly load adapters from the Hub using the commands below:
|
||||
"""
|
||||
|
||||
# import torch
|
||||
# from peft import PeftModel, PeftConfig
|
||||
# from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
#
|
||||
# peft_model_id = "ybelkada/opt-6.7b-lora"
|
||||
# config = PeftConfig.from_pretrained(peft_model_id)
|
||||
# model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, return_dict=True, load_in_8bit=True, device_map='auto')
|
||||
# tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
|
||||
#
|
||||
## Load the Lora model
|
||||
# model = PeftModel.from_pretrained(model, peft_model_id)
|
||||
#
|
||||
# """## Inference
|
||||
#
|
||||
# You can then directly use the trained model or the model that you have loaded from the 🤗 Hub for inference as you would do it usually in `transformers`.
|
||||
# """
|
||||
#
|
||||
batch = tokenizer("Two things are infinite: ", return_tensors="pt")
|
||||
|
||||
model.config.use_cache = False # silence the warnings. Please re-enable for inference!
|
||||
model.eval()
|
||||
with torch.cuda.amp.autocast():
|
||||
output_tokens = model.generate(**batch, max_new_tokens=50)
|
||||
|
||||
print("\n\n", tokenizer.decode(output_tokens[0], skip_special_tokens=True))
|
||||
# model.save('./test.pt')
|
||||
|
||||
# """As you can see by fine-tuning for few steps we have almost recovered the quote from Albert Einstein that is present in the [training data](https://huggingface.co/datasets/Abirate/english_quotes)."""
|
@ -1,7 +1,15 @@
|
||||
# Fine-tuning for image classification using LoRA and 🤗 PEFT
|
||||
|
||||
## Vision Transformer model from transformers
|
||||
|
||||
[](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_peft_lora.ipynb)
|
||||
|
||||
We provide a notebook (`image_classification_peft_lora.ipynb`) where we learn how to use [LoRA](https://arxiv.org/abs/2106.09685) from 🤗 PEFT to fine-tune an image classification model by ONLY using **0.7%** of the original trainable parameters of the model.
|
||||
|
||||
LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://arxiv.org/abs/2106.09685).
|
||||
|
||||
## PoolFormer model from timm
|
||||
|
||||
[](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_timm_peft_lora.ipynb)
|
||||
|
||||
The notebook `image_classification_timm_peft_lora.ipynb` showcases fine-tuning an image classification model using from the [timm](https://huggingface.co/docs/timm/index) library. Again, LoRA is used to reduce the numberof trainable parameters to a fraction of the total.
|
||||
|
@ -61,7 +61,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install transformers accelerate evaluate datasets loralib git+https://github.com/huggingface/peft -q"
|
||||
"!pip install transformers accelerate evaluate datasets git+https://github.com/huggingface/peft -q"
|
||||
]
|
||||
},
|
||||
{
|
||||
|
File diff suppressed because one or more lines are too long
@ -71,7 +71,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install -q bitsandbytes datasets accelerate loralib\n",
|
||||
"!pip install -q bitsandbytes datasets accelerate\n",
|
||||
"!pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git@main"
|
||||
]
|
||||
},
|
||||
@ -305,7 +305,7 @@
|
||||
"\n",
|
||||
"model_name = \"google/flan-t5-large\"\n",
|
||||
"\n",
|
||||
"model = AutoModelForSeq2SeqLM.from_pretrained(model_name, load_in_8bit=True, device_map=\"auto\")\n",
|
||||
"model = AutoModelForSeq2SeqLM.from_pretrained(model_name, load_in_8bit=True)\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(model_name)"
|
||||
]
|
||||
},
|
||||
@ -1186,7 +1186,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 1,
|
||||
"id": "j097aaPWJ-9u",
|
||||
"metadata": {
|
||||
"id": "j097aaPWJ-9u"
|
||||
@ -1209,7 +1209,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 2,
|
||||
"id": "jmjwWYt0KI_I",
|
||||
"metadata": {
|
||||
"colab": {
|
||||
@ -1264,7 +1264,7 @@
|
||||
},
|
||||
"gpuClass": "standard",
|
||||
"kernelspec": {
|
||||
"display_name": "fix-test",
|
||||
"display_name": "Python 3.10.11 ('accelerate': conda)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@ -1278,11 +1278,11 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "6c4e21ff5edce2fb2cfe7eb854551da92c6ec05cac2504057bb7aba62f43a5ec"
|
||||
"hash": "1219a10c7def3e2ad4f431cfa6f49d569fcc5949850132f23800e792129eefbb"
|
||||
}
|
||||
},
|
||||
"widgets": {
|
||||
|
@ -59,7 +59,7 @@
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"!pip install -q bitsandbytes datasets accelerate loralib\n",
|
||||
"!pip install -q bitsandbytes datasets accelerate\n",
|
||||
"!pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git"
|
||||
]
|
||||
},
|
||||
@ -76,7 +76,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"execution_count": 4,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
@ -198,76 +198,10 @@
|
||||
"outputId": "135a7675-6a4d-4786-b5dc-34cb867f40c7"
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"===================================BUG REPORT===================================\n",
|
||||
"Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues\n",
|
||||
"================================================================================\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "d4de260ffd8a440eb87eb900fc1bb1d3",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)lve/main/config.json: 0%| | 0.00/651 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "fc2d5ffe254d425b939252ec46ec27cc",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)model.bin.index.json: 0%| | 0.00/41.9k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "c6f712eadc4d49019b2bd355968cc155",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)00001-of-00002.bin\";: 0%| | 0.00/9.96G [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "5aa74b9b30614172b07f88873cf89471",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)00002-of-00002.bin\";: 0%| | 0.00/3.36G [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "e73e5388182040a8937ccf1748171a87",
|
||||
"model_id": "bee2f575b3e64c30b2f3afa137802406",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
@ -277,92 +211,17 @@
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "a994beafbf3f4c20880a7bbe3898db36",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)neration_config.json: 0%| | 0.00/137 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "1e9391f6c89c4d08859ef3413edb19be",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)okenizer_config.json: 0%| | 0.00/685 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "4e6d5943bc374b388b93ed115e44b6a5",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)olve/main/vocab.json: 0%| | 0.00/899k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "1ca7684b79c5438fa06b047bd2b3283f",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)olve/main/merges.txt: 0%| | 0.00/456k [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "d46b5725c35142a89617e46c0e8d3679",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)cial_tokens_map.json: 0%| | 0.00/441 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"import os\n",
|
||||
"\n",
|
||||
"os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"\n",
|
||||
"import torch\n",
|
||||
"import torch.nn as nn\n",
|
||||
"import bitsandbytes as bnb\n",
|
||||
"from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM\n",
|
||||
"\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(\n",
|
||||
" \"facebook/opt-6.7b\",\n",
|
||||
" load_in_8bit=True,\n",
|
||||
" device_map=\"auto\",\n",
|
||||
")\n",
|
||||
"model = AutoModelForCausalLM.from_pretrained(\"facebook/opt-6.7b\", load_in_8bit=True)\n",
|
||||
"\n",
|
||||
"tokenizer = AutoTokenizer.from_pretrained(\"facebook/opt-6.7b\")"
|
||||
]
|
||||
@ -384,7 +243,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"execution_count": 5,
|
||||
"metadata": {
|
||||
"id": "T-gy-LxM0yAi"
|
||||
},
|
||||
@ -408,7 +267,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"execution_count": 6,
|
||||
"metadata": {
|
||||
"id": "4W1j6lxaNnxC"
|
||||
},
|
||||
@ -431,7 +290,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"execution_count": 7,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
@ -477,7 +336,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
@ -1520,7 +1379,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 10,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/",
|
||||
@ -1555,45 +1414,23 @@
|
||||
},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"===================================BUG REPORT===================================\n",
|
||||
"Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues\n",
|
||||
"For effortless bug reporting copy-paste your error into this form: https://docs.google.com/forms/d/e/1FAIpQLScPB8emS3Thkp66nvqwmjTEgxp8Y9ufuWTzFyr9kJ5AoI47dQ/viewform?usp=sf_link\n",
|
||||
"================================================================================\n",
|
||||
"CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching /usr/local/cuda/lib64...\n",
|
||||
"CUDA SETUP: CUDA runtime path found: /usr/local/cuda/lib64/libcudart.so\n",
|
||||
"CUDA SETUP: Highest compute capability among GPUs detected: 7.5\n",
|
||||
"CUDA SETUP: Detected CUDA version 112\n",
|
||||
"CUDA SETUP: Loading binary /usr/local/lib/python3.8/dist-packages/bitsandbytes/libbitsandbytes_cuda112.so...\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/usr/local/lib/python3.8/dist-packages/bitsandbytes/cuda_setup/main.py:134: UserWarning: /usr/lib64-nvidia did not contain libcudart.so as expected! Searching further paths...\n",
|
||||
" warn(msg)\n",
|
||||
"/usr/local/lib/python3.8/dist-packages/bitsandbytes/cuda_setup/main.py:134: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('/sys/fs/cgroup/memory.events /var/colab/cgroup/jupyter-children/memory.events')}\n",
|
||||
" warn(msg)\n",
|
||||
"/usr/local/lib/python3.8/dist-packages/bitsandbytes/cuda_setup/main.py:134: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('/usr/share/tcltk/tcllib1.19')}\n",
|
||||
" warn(msg)\n",
|
||||
"/usr/local/lib/python3.8/dist-packages/bitsandbytes/cuda_setup/main.py:134: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('--listen_host=172.28.0.12 --target_host=172.28.0.12 --tunnel_background_save_url=https'), PosixPath('//colab.research.google.com/tun/m/cc48301118ce562b961b3c22d803539adc1e0c19/gpu-t4-s-38j9a9wfgbvb0 --tunnel_background_save_delay=10s --tunnel_periodic_background_save_frequency=30m0s --enable_output_coalescing=true --output_coalescing_required=true')}\n",
|
||||
" warn(msg)\n",
|
||||
"/usr/local/lib/python3.8/dist-packages/bitsandbytes/cuda_setup/main.py:134: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('/env/python')}\n",
|
||||
" warn(msg)\n",
|
||||
"/usr/local/lib/python3.8/dist-packages/bitsandbytes/cuda_setup/main.py:134: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('6000,\"kernelManagerProxyHost\"'), PosixPath('true}'), PosixPath('[\"--ip=172.28.0.12\",\"--transport=ipc\"],\"debugAdapterMultiplexerPath\"'), PosixPath('\"172.28.0.12\",\"jupyterArgs\"'), PosixPath('\"/usr/local/bin/dap_multiplexer\",\"enableLsp\"'), PosixPath('{\"kernelManagerProxyPort\"')}\n",
|
||||
" warn(msg)\n",
|
||||
"/usr/local/lib/python3.8/dist-packages/bitsandbytes/cuda_setup/main.py:134: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('//ipykernel.pylab.backend_inline'), PosixPath('module')}\n",
|
||||
" warn(msg)\n"
|
||||
]
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "621d427f78fb458e8ae25262f2ab7ca8",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)/adapter_config.json: 0%| | 0.00/332 [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
"output_type": "display_data"
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "ff2454cf69b346fea70070522cf93689",
|
||||
"model_id": "4a2107423a164efd89002e031126c8b5",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
@ -1607,12 +1444,12 @@
|
||||
{
|
||||
"data": {
|
||||
"application/vnd.jupyter.widget-view+json": {
|
||||
"model_id": "75913676a5df43fbbfe744b8882188df",
|
||||
"model_id": "43f2a9b0f37e4caab35d7dda43f051f9",
|
||||
"version_major": 2,
|
||||
"version_minor": 0
|
||||
},
|
||||
"text/plain": [
|
||||
"Downloading (…)\"adapter_model.bin\";: 0%| | 0.00/33.6M [00:00<?, ?B/s]"
|
||||
"Downloading adapter_model.bin: 0%| | 0.00/33.6M [00:00<?, ?B/s]"
|
||||
]
|
||||
},
|
||||
"metadata": {},
|
||||
@ -1648,7 +1485,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"colab": {
|
||||
"base_uri": "https://localhost:8080/"
|
||||
@ -1661,10 +1498,8 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/usr/local/lib/python3.8/dist-packages/transformers/generation/utils.py:1359: UserWarning: You are calling .generate() with the `input_ids` being on a device type different than your model's device. `input_ids` is on cpu, whereas the model is on cuda. You may experience unexpected behaviors or slower generation. Please make sure that you have put `input_ids` to the correct device by calling for example input_ids = input_ids.to('cuda') before running `.generate()`.\n",
|
||||
" warnings.warn(\n",
|
||||
"/usr/local/lib/python3.8/dist-packages/bitsandbytes/autograd/_functions.py:233: UserWarning: MatMul8bitLt: inputs will be cast from torch.float32 to float16 during quantization\n",
|
||||
" warnings.warn(f\"MatMul8bitLt: inputs will be cast from {A.dtype} to float16 during quantization\")\n"
|
||||
"/home/marc/anaconda3/envs/accelerate/lib/python3.10/site-packages/transformers/generation/utils.py:1448: UserWarning: You are calling .generate() with the `input_ids` being on a device type different than your model's device. `input_ids` is on cpu, whereas the model is on cuda. You may experience unexpected behaviors or slower generation. Please make sure that you have put `input_ids` to the correct device by calling for example input_ids = input_ids.to('cuda') before running `.generate()`.\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
@ -1705,7 +1540,7 @@
|
||||
},
|
||||
"gpuClass": "standard",
|
||||
"kernelspec": {
|
||||
"display_name": "fix-test",
|
||||
"display_name": "Python 3.10.11 ('accelerate': conda)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@ -1719,11 +1554,11 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.16"
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "6c4e21ff5edce2fb2cfe7eb854551da92c6ec05cac2504057bb7aba62f43a5ec"
|
||||
"hash": "1219a10c7def3e2ad4f431cfa6f49d569fcc5949850132f23800e792129eefbb"
|
||||
}
|
||||
},
|
||||
"widgets": {
|
||||
|
@ -29,7 +29,7 @@ config = LoraConfig(
|
||||
)
|
||||
|
||||
# We load our model and processor using `transformers`
|
||||
model = AutoModelForVision2Seq.from_pretrained("Salesforce/blip2-opt-2.7b", load_in_8bit=True, device_map={"": 0})
|
||||
model = AutoModelForVision2Seq.from_pretrained("Salesforce/blip2-opt-2.7b", load_in_8bit=True)
|
||||
processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
|
||||
|
||||
# Get our peft model and print the number of trainable parameters
|
||||
|
@ -422,16 +422,11 @@ def evaluation_loop(model, eval_dataloader, processor, normalizer, metric, force
|
||||
def main():
|
||||
args = parse_args()
|
||||
|
||||
# initialize accelerator
|
||||
accelerator = (
|
||||
Accelerator(
|
||||
log_with=args.report_to,
|
||||
project_dir=args.output_dir,
|
||||
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
||||
)
|
||||
if args.with_tracking
|
||||
else Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps)
|
||||
)
|
||||
accelerator_kwargs = {"gradient_accumulation_steps": args.gradient_accumulation_steps}
|
||||
if args.with_tracking:
|
||||
accelerator_kwargs["log_with"] = args.report_to
|
||||
accelerator_kwargs["project_dir"] = args.output_dir
|
||||
accelerator = Accelerator(**accelerator_kwargs)
|
||||
|
||||
# Make one log on every process with the configuration for debugging.
|
||||
logging.basicConfig(
|
||||
@ -538,9 +533,7 @@ def main():
|
||||
metric = evaluate.load("wer")
|
||||
|
||||
# model
|
||||
model = WhisperForConditionalGeneration.from_pretrained(
|
||||
args.model_name_or_path, load_in_8bit=True, device_map="auto"
|
||||
)
|
||||
model = WhisperForConditionalGeneration.from_pretrained(args.model_name_or_path, load_in_8bit=True)
|
||||
model.config.forced_decoder_ids = None
|
||||
model.config.suppress_tokens = []
|
||||
if len(set(model.hf_device_map.values()).intersection({"cpu", "disk"})) > 0:
|
||||
|
@ -64,7 +64,7 @@
|
||||
"!pip install evaluate>=0.30\n",
|
||||
"!pip install jiwer\n",
|
||||
"!pip install gradio\n",
|
||||
"!pip install -q bitsandbytes datasets accelerate loralib\n",
|
||||
"!pip install -q bitsandbytes datasets accelerate\n",
|
||||
"!pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git@main"
|
||||
]
|
||||
},
|
||||
@ -1104,7 +1104,7 @@
|
||||
"source": [
|
||||
"from transformers import WhisperForConditionalGeneration\n",
|
||||
"\n",
|
||||
"model = WhisperForConditionalGeneration.from_pretrained(model_name_or_path, load_in_8bit=True, device_map=\"auto\")\n",
|
||||
"model = WhisperForConditionalGeneration.from_pretrained(model_name_or_path, load_in_8bit=True)\n",
|
||||
"\n",
|
||||
"# model.hf_device_map - this should be {\" \": 0}"
|
||||
]
|
||||
@ -1930,7 +1930,7 @@
|
||||
},
|
||||
"gpuClass": "standard",
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"display_name": "Python 3.10.11 ('accelerate': conda)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
@ -1944,7 +1944,12 @@
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.6"
|
||||
"version": "3.10.11"
|
||||
},
|
||||
"vscode": {
|
||||
"interpreter": {
|
||||
"hash": "1219a10c7def3e2ad4f431cfa6f49d569fcc5949850132f23800e792129eefbb"
|
||||
}
|
||||
},
|
||||
"widgets": {
|
||||
"application/vnd.jupyter.widget-state+json": {
|
||||
|
140
examples/loftq_finetuning/README.md
Normal file
140
examples/loftq_finetuning/README.md
Normal file
@ -0,0 +1,140 @@
|
||||
# LoftQ: LoRA-fine-tuning-aware Quantization
|
||||
|
||||
## Introduction
|
||||
|
||||
LoftQ finds quantized LoRA initialization: quantized backbone Q and LoRA adapters A and B, given a pre-trained weight W.
|
||||
|
||||
## Quick Start
|
||||
Steps:
|
||||
|
||||
1. Apply LoftQ to a full-precision pre-trained weight and save.
|
||||
2. Load LoftQ initialization and train.
|
||||
|
||||
For step 1, we have provided off-the-shelf LoftQ initializations (see [supported model list](#appendix-off-the-shelf-model-table))
|
||||
in [Huggingface Hub LoftQ](https://huggingface.co/LoftQ).
|
||||
If you want to do it yourself, jump to [LoftQ DIY](#loftq-diy).
|
||||
|
||||
For step 2, below is an example of loading 4bit Mistral-7B with 64rank LoRA adapters from Huggingface Hub.
|
||||
```python
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
|
||||
from peft import PeftModel
|
||||
|
||||
MODEL_ID = "LoftQ/Mistral-7B-v0.1-4bit-64rank"
|
||||
|
||||
base_model = AutoModelForCausalLM.from_pretrained(
|
||||
MODEL_ID,
|
||||
torch_dtype=torch.bfloat16, # you may change it with different models
|
||||
quantization_config=BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_compute_dtype=torch.bfloat16, # bfloat16 is recommended
|
||||
bnb_4bit_use_double_quant=False,
|
||||
bnb_4bit_quant_type='nf4',
|
||||
),
|
||||
)
|
||||
peft_model = PeftModel.from_pretrained(
|
||||
base_model,
|
||||
MODEL_ID,
|
||||
subfolder="loftq_init",
|
||||
is_trainable=True,
|
||||
)
|
||||
|
||||
# Do training with peft_model ...
|
||||
```
|
||||
|
||||
## LoftQ DIY
|
||||
|
||||
### Apply LoftQ and save
|
||||
We provide [quantize_save_load.py](quantize_save_load.py) as an example to apply LoftQ with
|
||||
different bits(`--bits`), ranks(`--rank`), and alternating steps (`--iter`, a hyper-parameter in LoftQ, see Algorithm 1 in [LoftQ paper](https://arxiv.org/abs/2310.08659)). Currently, this example supports
|
||||
`llama-2`, `falcon`, `mistral`, `bart`, `t5`, `deberta`, `bert`, `roberta`.
|
||||
|
||||
Below is an example of obtaining 4bit LLAMA-2-7b with 16-rank LoRA adapters by 5 alternating steps.
|
||||
```sh
|
||||
SAVE_DIR="model_zoo/loftq/"
|
||||
python quantize_save_load.py \
|
||||
--model_name_or_path meta-llama/Llama-2-7b-hf \ # high-precision model id in HF
|
||||
--token HF_TOKEN \ # your HF token if the model is private, e.g., llama-2
|
||||
--bits 4 \
|
||||
--iter 5 \
|
||||
--rank 16 \
|
||||
--save_dir $SAVE_DIR
|
||||
```
|
||||
|
||||
The above commands end up with creating the model directory under `$SAVE_DIR`.
|
||||
Specifically, the model directory is named as
|
||||
|
||||
`MODEL_DIR = SAVE_DIR + f"{args.model_name_or_path.split('/')[-1]}-{args.bits}bits-{args.rank}rank"`
|
||||
|
||||
In this example, `MODEL_DIR="model_zoo/loftq/Llama-2-7b-hf-4bit-16rank"`, where the backbone is stored in `$MODEL_DIR`
|
||||
and the LoRA adapters are at the sub-folder `$MODEL_DIR/loftq_init`.
|
||||
|
||||
### Load and train
|
||||
Similar to loading from Huggingface Hub, we only need to change the `MODEL_ID` to the `MODEL_DIR`.
|
||||
|
||||
```python
|
||||
import torch
|
||||
from transformers import AutoModelForCausalLM, BitsAndBytesConfig
|
||||
from peft import PeftModel
|
||||
|
||||
MODEL_DIR = "model_zoo/loftq/Llama-2-7b-hf-4bit-16rank"
|
||||
|
||||
base_model = AutoModelForCausalLM.from_pretrained(
|
||||
MODEL_DIR,
|
||||
torch_dtype=torch.bfloat16,
|
||||
quantization_config=BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_compute_dtype=torch.bfloat16,
|
||||
bnb_4bit_use_double_quant=False,
|
||||
bnb_4bit_quant_type='nf4',
|
||||
),
|
||||
)
|
||||
peft_model = PeftModel.from_pretrained(
|
||||
base_model,
|
||||
MODEL_DIR,
|
||||
subfolder="loftq_init",
|
||||
is_trainable=True,
|
||||
)
|
||||
# Do training with peft_model ...
|
||||
```
|
||||
|
||||
## LoftQ Fine-tuning
|
||||
|
||||
We also provide an example to fine-tune LoftQ on GSM8K.
|
||||
We load the quantized backbone and LoRA adapters from the [LoftQ Huggingface hub](https://huggingface.co/LoftQ).
|
||||
|
||||
```sh
|
||||
python train_gsm8k_llama.py \
|
||||
--model_name_or_path LoftQ/Llama-2-13b-hf-4bit-64rank \
|
||||
--output_dir exp_results/gsm8k/llama-2-13b/bit4-rank64/lr1e-4 \
|
||||
--learning_rate 1e-4 \
|
||||
--weight_decay 0.1 \
|
||||
--lr_scheduler_type cosine \
|
||||
--num_warmup_steps 100 \
|
||||
--seed 202 \
|
||||
--dataset_name gsm8k \
|
||||
--dataset_config main \
|
||||
--pad_to_max_length \
|
||||
--max_source_length 128 \
|
||||
--max_target_length 256 \
|
||||
--num_train_epochs 5 \
|
||||
--per_device_train_batch_size 4 \
|
||||
--per_device_eval_batch_size 4 \
|
||||
--gradient_accumulation_steps 4 \
|
||||
--with_tracking \
|
||||
--report_to tensorboard
|
||||
```
|
||||
|
||||
|
||||
## Appendix: Off-the-shelf Model List
|
||||
| Model Name | Bits | Ranks |
|
||||
| ----------- | ---- | ----- |
|
||||
| LLAMA-2-7b | 4 | 64 |
|
||||
| LLAMA-2-13b | 4 | 64 |
|
||||
| LLAMA-2-70b | 4 | 64 |
|
||||
| Mistral | 4 | 64 |
|
||||
| Mistral | 4 | 32 |
|
||||
| BART-large | 4 | 8 |
|
||||
| BART-large | 4 | 16 |
|
||||
| BART-large | 4 | 32 |
|
||||
| BART-large | 2 | 8 |
|
194
examples/loftq_finetuning/quantize_save_load.py
Normal file
194
examples/loftq_finetuning/quantize_save_load.py
Normal file
@ -0,0 +1,194 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2023-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import os
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from transformers import (
|
||||
AutoModelForCausalLM,
|
||||
AutoModelForSeq2SeqLM,
|
||||
AutoModelForSequenceClassification,
|
||||
AutoTokenizer,
|
||||
)
|
||||
|
||||
from peft import LoftQConfig, LoraConfig, TaskType, get_peft_model
|
||||
|
||||
|
||||
class Shell(nn.Module):
|
||||
def __init__(self, weight, bias=None):
|
||||
super().__init__()
|
||||
self.weight = nn.Parameter(weight, requires_grad=False)
|
||||
if bias is not None:
|
||||
self.bias = nn.Parameter(bias, requires_grad=False)
|
||||
|
||||
|
||||
def unwrap_model(model, sub_module_name=".base_layer"):
|
||||
sub_module_name_list = [k.split(sub_module_name)[0] for k in model.state_dict().keys() if sub_module_name in k]
|
||||
sub_module_name_set = set(sub_module_name_list)
|
||||
for name in sub_module_name_set:
|
||||
# get the parent of the submodule
|
||||
name_parent = ".".join(name.split(".")[:-1])
|
||||
name_child = name.split(".")[-1]
|
||||
sub_module = model.get_submodule(name_parent)
|
||||
print(sub_module)
|
||||
|
||||
# replace with shell
|
||||
child = getattr(sub_module, name_child)
|
||||
weight = getattr(child.base_layer, "weight", None)
|
||||
bias = getattr(child.base_layer, "bias", None)
|
||||
shell = Shell(weight, bias)
|
||||
|
||||
setattr(sub_module, name_child, shell)
|
||||
|
||||
print("You have unwrapped the model. Use it on your own risk.")
|
||||
|
||||
|
||||
def print_model(model, name):
|
||||
print("=" * 10 + name + "=" * 10)
|
||||
print(model)
|
||||
for name, param in model.named_parameters():
|
||||
if torch.is_tensor(param):
|
||||
if param.dtype in [torch.float32, torch.float16]:
|
||||
print(
|
||||
name,
|
||||
param.shape,
|
||||
param.device,
|
||||
param.dtype,
|
||||
param.requires_grad,
|
||||
param.mean().item(),
|
||||
param.max().item(),
|
||||
)
|
||||
else:
|
||||
print(name, param.shape, param.device, param.dtype, param.requires_grad)
|
||||
|
||||
|
||||
def arg_parse():
|
||||
parser = argparse.ArgumentParser(description="Quantize a model with LoftQ.")
|
||||
parser.add_argument(
|
||||
"--model_name_or_path",
|
||||
type=str,
|
||||
default=None,
|
||||
required=True,
|
||||
help="The name or path of the fp32/16 model.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--token",
|
||||
type=str,
|
||||
default=None,
|
||||
help="The access token to download model from HuggingFace Hub.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--bits",
|
||||
type=int,
|
||||
default=4,
|
||||
help="The quantized bits",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--iter",
|
||||
type=int,
|
||||
default=1,
|
||||
help="The alternating steps in LoftQ",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--rank",
|
||||
type=int,
|
||||
default=16,
|
||||
help="The rank of the LoRA adapter",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--save_dir",
|
||||
type=str,
|
||||
default="./model_zoo/loftq/",
|
||||
help="The rank of the LoRA adapter",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
|
||||
def quantize_and_save():
|
||||
args = arg_parse()
|
||||
|
||||
# Download weights and configure LoRA
|
||||
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path, token=args.token, trust_remote_code=True)
|
||||
if any(name in args.model_name_or_path.lower() for name in ["llama", "mistral", "falcon"]):
|
||||
model = AutoModelForCausalLM.from_pretrained(args.model_name_or_path, token=args.token, trust_remote_code=True)
|
||||
task_type = TaskType.CAUSAL_LM
|
||||
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "down_proj", "gate_proj"]
|
||||
|
||||
elif any(name in args.model_name_or_path.lower() for name in ["bart", "t5"]):
|
||||
model = AutoModelForSeq2SeqLM.from_pretrained(args.model_name_or_path, token=args.token)
|
||||
task_type = TaskType.SEQ_2_SEQ_LM
|
||||
target_modules = ["q_proj", "k_proj", "v_proj", "fc1", "fc2", "out_proj"]
|
||||
|
||||
elif any(name in args.model_name_or_path.lower() for name in ["deberta", "roberta", "bert"]):
|
||||
model = AutoModelForSequenceClassification.from_pretrained(args.model_name_or_path, token=args.token)
|
||||
task_type = TaskType.SEQ_CLS
|
||||
target_modules = ["query_proj", "key_proj", "value_proj", "dense"] # embeddings not supported by peft
|
||||
else:
|
||||
raise NotImplementedError("Other models not supported yet.")
|
||||
|
||||
# Config of LoftQ
|
||||
loftq_config = LoftQConfig(loftq_bits=args.bits, loftq_iter=args.iter)
|
||||
|
||||
lora_config = LoraConfig(
|
||||
task_type=task_type,
|
||||
inference_mode=True,
|
||||
r=args.rank,
|
||||
lora_alpha=16 if task_type is TaskType.CAUSAL_LM else args.rank,
|
||||
lora_dropout=0.1,
|
||||
target_modules=target_modules,
|
||||
init_lora_weights="loftq",
|
||||
loftq_config=loftq_config,
|
||||
)
|
||||
|
||||
# Obtain LoftQ model
|
||||
lora_model = get_peft_model(model, lora_config)
|
||||
base_model = lora_model.get_base_model()
|
||||
|
||||
# Save LoftQ model
|
||||
model_name = args.model_name_or_path.split("/")[-1] + f"-{args.bits}bit" + f"-{args.rank}rank"
|
||||
base_model_dir = os.path.join(args.save_dir, model_name)
|
||||
lora_model_dir = os.path.join(args.save_dir, model_name, "loft_init")
|
||||
|
||||
# save lora adapters first
|
||||
lora_model.base_model.peft_config[
|
||||
"default"
|
||||
].base_model_name_or_path = base_model_dir # This can be a local path or Hub model id
|
||||
lora_model.base_model.peft_config["default"].init_lora_weights = True # Don't apply LoftQ when loading again
|
||||
|
||||
lora_model.save_pretrained(lora_model_dir)
|
||||
print_model(lora_model, "lora_model")
|
||||
|
||||
# remove lora adapters and save the backbone
|
||||
unwrap_model(base_model)
|
||||
base_model.save_pretrained(base_model_dir)
|
||||
tokenizer.save_pretrained(base_model_dir)
|
||||
|
||||
print_model(base_model, "base_model")
|
||||
|
||||
return base_model_dir, lora_model_dir
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
base_dir, lora_dir = quantize_and_save()
|
||||
|
||||
# example command:
|
||||
# python quantize_save_load.py \
|
||||
# --model_name_or_path meta-llama/Llama-2-7b-hf \
|
||||
# --token XXX \
|
||||
# --bits 4 --iter 5 --rank 16 \
|
||||
# --save_dir ./model_zoo/loftq/
|
846
examples/loftq_finetuning/train_gsm8k_llama.py
Normal file
846
examples/loftq_finetuning/train_gsm8k_llama.py
Normal file
@ -0,0 +1,846 @@
|
||||
# coding=utf-8
|
||||
# Copyright 2023-present the HuggingFace Inc. team.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import argparse
|
||||
import copy
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
import datasets
|
||||
import torch
|
||||
import transformers
|
||||
from accelerate import Accelerator, DistributedType
|
||||
from accelerate.logging import get_logger
|
||||
from accelerate.utils import set_seed
|
||||
from datasets import load_dataset
|
||||
from huggingface_hub import Repository, create_repo
|
||||
from torch.utils.data import DataLoader
|
||||
from tqdm.auto import tqdm
|
||||
from transformers import (
|
||||
CONFIG_MAPPING,
|
||||
MODEL_MAPPING,
|
||||
AutoConfig,
|
||||
AutoModelForCausalLM,
|
||||
AutoTokenizer,
|
||||
BitsAndBytesConfig,
|
||||
SchedulerType,
|
||||
default_data_collator,
|
||||
get_scheduler,
|
||||
)
|
||||
from transformers.utils import send_example_telemetry
|
||||
from transformers.utils.versions import require_version
|
||||
|
||||
from peft import PeftModel
|
||||
|
||||
|
||||
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
|
||||
# check_min_version("4.32.0.dev0")
|
||||
|
||||
logger = get_logger(__name__)
|
||||
|
||||
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
|
||||
|
||||
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
|
||||
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task")
|
||||
parser.add_argument(
|
||||
"--dataset_name",
|
||||
type=str,
|
||||
default=None,
|
||||
help="The name of the dataset to use (via the datasets library).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dataset_config_name",
|
||||
type=str,
|
||||
default=None,
|
||||
help="The configuration name of the dataset to use (via the datasets library).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--train_file", type=str, default=None, help="A csv, txt or a json file containing the training data."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--validation_file", type=str, default=None, help="A csv, txt or a json file containing the validation data."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--validation_split_percentage",
|
||||
default=5,
|
||||
help="The percentage of the train set used as validation set in case there's no validation split",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--model_name_or_path",
|
||||
type=str,
|
||||
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
||||
required=False,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--config_name",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Pretrained config name or path if not the same as model_name",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--tokenizer_name",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Pretrained tokenizer name or path if not the same as model_name",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use_slow_tokenizer",
|
||||
action="store_true",
|
||||
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--per_device_train_batch_size",
|
||||
type=int,
|
||||
default=8,
|
||||
help="Batch size (per device) for the training dataloader.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--per_device_eval_batch_size",
|
||||
type=int,
|
||||
default=8,
|
||||
help="Batch size (per device) for the evaluation dataloader.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--learning_rate",
|
||||
type=float,
|
||||
default=5e-5,
|
||||
help="Initial learning rate (after the potential warmup period) to use.",
|
||||
)
|
||||
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
|
||||
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
|
||||
parser.add_argument(
|
||||
"--max_train_steps",
|
||||
type=int,
|
||||
default=None,
|
||||
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--gradient_accumulation_steps",
|
||||
type=int,
|
||||
default=1,
|
||||
help="Number of updates steps to accumulate before performing a backward/update pass.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--lr_scheduler_type",
|
||||
type=SchedulerType,
|
||||
default="linear",
|
||||
help="The scheduler type to use.",
|
||||
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
|
||||
)
|
||||
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
|
||||
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
|
||||
parser.add_argument(
|
||||
"--model_type",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Model type to use if training from scratch.",
|
||||
choices=MODEL_TYPES,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ignore_pad_token_for_loss",
|
||||
type=bool,
|
||||
default=True,
|
||||
help="Whether to ignore the tokens corresponding to padded labels in the loss computation or not.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_source_length",
|
||||
type=int,
|
||||
default=128,
|
||||
help=(
|
||||
"The maximum total input sequence length after "
|
||||
"tokenization.Sequences longer than this will be truncated, sequences shorter will be padded."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_target_length",
|
||||
type=int,
|
||||
default=128,
|
||||
help=(
|
||||
"The maximum total sequence length for target text after "
|
||||
"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded."
|
||||
"during ``evaluate`` and ``predict``."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pad_to_max_length",
|
||||
action="store_true",
|
||||
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--preprocessing_num_workers",
|
||||
type=int,
|
||||
default=None,
|
||||
help="The number of processes to use for the preprocessing.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files."
|
||||
)
|
||||
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
|
||||
parser.add_argument(
|
||||
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
|
||||
)
|
||||
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
|
||||
parser.add_argument(
|
||||
"--trust_remote_code",
|
||||
type=bool,
|
||||
default=False,
|
||||
help=(
|
||||
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
|
||||
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
|
||||
"execute code present on the Hub on your local machine."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--checkpointing_steps",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--resume_from_checkpoint",
|
||||
type=str,
|
||||
default=None,
|
||||
help="If the training should continue from a checkpoint folder.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--with_tracking",
|
||||
action="store_true",
|
||||
help="Whether to enable experiment trackers for logging.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--report_to",
|
||||
type=str,
|
||||
default="tensorboard",
|
||||
help=(
|
||||
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
|
||||
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
|
||||
"Only applicable when `--with_tracking` is passed."
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--low_cpu_mem_usage",
|
||||
action="store_true",
|
||||
help=(
|
||||
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
|
||||
"If passed, LLM loading time and RAM consumption will be benefited."
|
||||
),
|
||||
)
|
||||
##########################
|
||||
# Generation Config #
|
||||
##########################
|
||||
parser.add_argument(
|
||||
"--temperature",
|
||||
type=float,
|
||||
default=0.8,
|
||||
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
|
||||
)
|
||||
parser.add_argument("--k", type=int, default=40, help="Choose k candidate words")
|
||||
parser.add_argument("--p", type=float, default=0.95, help="The sum of probability of candidate words is 0.9 ")
|
||||
|
||||
##########################
|
||||
# Exp Args #
|
||||
##########################
|
||||
parser.add_argument(
|
||||
"--adapter_name_or_path",
|
||||
type=str,
|
||||
default=None,
|
||||
help=(
|
||||
"The LoRA adapter checkpoint. Set None if you want to fine-tune from LoftQ."
|
||||
"Specify a path if you want to evaluate."
|
||||
),
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Sanity checks
|
||||
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
|
||||
raise ValueError("Need either a dataset name or a training/validation file.")
|
||||
else:
|
||||
if args.train_file is not None:
|
||||
extension = args.train_file.split(".")[-1]
|
||||
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file."
|
||||
if args.validation_file is not None:
|
||||
extension = args.validation_file.split(".")[-1]
|
||||
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file."
|
||||
|
||||
if args.push_to_hub:
|
||||
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def main():
|
||||
args = parse_args()
|
||||
|
||||
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
|
||||
# information sent is the one passed as arguments along with your Python/PyTorch versions.
|
||||
send_example_telemetry("run_clm_no_trainer", args)
|
||||
|
||||
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
|
||||
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
|
||||
# in the environment
|
||||
accelerator_log_kwargs = {}
|
||||
|
||||
if args.with_tracking:
|
||||
accelerator_log_kwargs["log_with"] = args.report_to
|
||||
accelerator_log_kwargs["project_dir"] = args.output_dir
|
||||
|
||||
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs)
|
||||
|
||||
# Make one log on every process with the configuration for debugging.
|
||||
logging.basicConfig(
|
||||
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
||||
datefmt="%m/%d/%Y %H:%M:%S",
|
||||
level=logging.INFO,
|
||||
)
|
||||
logger.info(accelerator.state, main_process_only=False)
|
||||
if accelerator.is_local_main_process:
|
||||
datasets.utils.logging.set_verbosity_warning()
|
||||
transformers.utils.logging.set_verbosity_info()
|
||||
else:
|
||||
datasets.utils.logging.set_verbosity_error()
|
||||
transformers.utils.logging.set_verbosity_error()
|
||||
|
||||
# If passed along, set the training seed now.
|
||||
if args.seed is not None:
|
||||
set_seed(args.seed)
|
||||
|
||||
# Handle the repository creation
|
||||
if accelerator.is_main_process:
|
||||
if args.push_to_hub:
|
||||
# Retrieve of infer repo_name
|
||||
repo_name = args.hub_model_id
|
||||
if repo_name is None:
|
||||
repo_name = Path(args.output_dir).absolute().name
|
||||
# Create repo and retrieve repo_id
|
||||
repo_id = create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id
|
||||
# Clone repo locally
|
||||
repo = Repository(args.output_dir, clone_from=repo_id, token=args.hub_token)
|
||||
|
||||
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
|
||||
if "step_*" not in gitignore:
|
||||
gitignore.write("step_*\n")
|
||||
if "epoch_*" not in gitignore:
|
||||
gitignore.write("epoch_*\n")
|
||||
elif args.output_dir is not None:
|
||||
os.makedirs(args.output_dir, exist_ok=True)
|
||||
accelerator.wait_for_everyone()
|
||||
|
||||
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
|
||||
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
|
||||
# (the dataset will be downloaded automatically from the datasets Hub).
|
||||
#
|
||||
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
|
||||
# 'text' is found. You can easily tweak this behavior (see below).
|
||||
#
|
||||
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
|
||||
# download the dataset.
|
||||
if args.dataset_name is not None:
|
||||
# Downloading and loading a dataset from the hub.
|
||||
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
|
||||
if "validation" not in raw_datasets.keys():
|
||||
raw_datasets["validation"] = load_dataset(
|
||||
args.dataset_name,
|
||||
args.dataset_config_name,
|
||||
split=f"train[:{args.validation_split_percentage}%]",
|
||||
)
|
||||
raw_datasets["train"] = load_dataset(
|
||||
args.dataset_name,
|
||||
args.dataset_config_name,
|
||||
split=f"train[{args.validation_split_percentage}%:]",
|
||||
)
|
||||
else:
|
||||
data_files = {}
|
||||
dataset_args = {}
|
||||
if args.train_file is not None:
|
||||
data_files["train"] = args.train_file
|
||||
if args.validation_file is not None:
|
||||
data_files["validation"] = args.validation_file
|
||||
extension = args.train_file.split(".")[-1]
|
||||
if extension == "txt":
|
||||
extension = "text"
|
||||
dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks
|
||||
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
|
||||
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
|
||||
if "validation" not in raw_datasets.keys():
|
||||
raw_datasets["validation"] = load_dataset(
|
||||
extension,
|
||||
data_files=data_files,
|
||||
split=f"train[:{args.validation_split_percentage}%]",
|
||||
**dataset_args,
|
||||
)
|
||||
raw_datasets["train"] = load_dataset(
|
||||
extension,
|
||||
data_files=data_files,
|
||||
split=f"train[{args.validation_split_percentage}%:]",
|
||||
**dataset_args,
|
||||
)
|
||||
|
||||
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
|
||||
# https://huggingface.co/docs/datasets/loading_datasets.html.
|
||||
|
||||
# Load pretrained model and tokenizer
|
||||
#
|
||||
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
|
||||
# download model & vocab.
|
||||
if args.config_name:
|
||||
config = AutoConfig.from_pretrained(
|
||||
args.config_name,
|
||||
trust_remote_code=args.trust_remote_code,
|
||||
)
|
||||
elif args.model_name_or_path:
|
||||
config = AutoConfig.from_pretrained(
|
||||
args.model_name_or_path,
|
||||
trust_remote_code=args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
config = CONFIG_MAPPING[args.model_type]()
|
||||
logger.warning("You are instantiating a new config instance from scratch.")
|
||||
|
||||
if args.tokenizer_name:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
|
||||
)
|
||||
elif args.model_name_or_path:
|
||||
tokenizer = AutoTokenizer.from_pretrained(
|
||||
args.model_name_or_path,
|
||||
use_fast=not args.use_slow_tokenizer,
|
||||
trust_remote_code=args.trust_remote_code,
|
||||
)
|
||||
else:
|
||||
raise ValueError(
|
||||
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
|
||||
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
|
||||
)
|
||||
|
||||
##########################
|
||||
# Tokenizer #
|
||||
##########################
|
||||
tokenizer.pad_token_id = 0 # unk. we want this to be different from the eos token
|
||||
tokenizer.padding_side = "left" # Allow batched inference
|
||||
tokenizer.truncation_side = "left"
|
||||
|
||||
if args.model_name_or_path:
|
||||
model = AutoModelForCausalLM.from_pretrained(
|
||||
args.model_name_or_path,
|
||||
from_tf=bool(".ckpt" in args.model_name_or_path),
|
||||
config=config,
|
||||
low_cpu_mem_usage=True,
|
||||
quantization_config=BitsAndBytesConfig(
|
||||
load_in_4bit=True,
|
||||
bnb_4bit_use_double_quant=False,
|
||||
bnb_4bit_quant_type="nf4",
|
||||
bnb_4bit_compute_dtype=config.torch_dtype,
|
||||
),
|
||||
)
|
||||
else:
|
||||
logger.info("Training new model from scratch")
|
||||
model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code)
|
||||
|
||||
##########################
|
||||
# Peft Model #
|
||||
##########################
|
||||
if args.adapter_name_or_path is None:
|
||||
model = PeftModel.from_pretrained(model, args.model_name_or_path, subfolder="loftq_init", is_trainable=True)
|
||||
else:
|
||||
model = PeftModel.from_pretrained(model, args.adapter_name_or_path, is_trainable=True)
|
||||
model.print_trainable_parameters()
|
||||
|
||||
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
|
||||
# on a small vocab and want a smaller embedding size, remove this test.
|
||||
embedding_size = model.get_input_embeddings().weight.shape[0]
|
||||
if len(tokenizer) > embedding_size:
|
||||
model.resize_token_embeddings(len(tokenizer))
|
||||
|
||||
# Preprocessing the datasets.
|
||||
# First we tokenize all the texts.
|
||||
##########################
|
||||
# GSM8K dataset #
|
||||
##########################
|
||||
|
||||
# Preprocessing the datasets.
|
||||
# First we tokenize all the texts.
|
||||
column_names = raw_datasets["train"].column_names
|
||||
|
||||
# Get the column names for source/target.
|
||||
source_column, target_column = "question", "answer"
|
||||
|
||||
# Temporarily set max_target_length for training.
|
||||
padding = "max_length" if args.pad_to_max_length else False
|
||||
task_prompt = "\nAnswer the above question. First think step by step and then answer the final number.\n"
|
||||
|
||||
def prompt_process(sent_1, sent_2, prompt_1="", prompt_2="", prompt_3=""):
|
||||
sent_2 = sent_2.replace("####", "The final answer is")
|
||||
return prompt_1 + sent_1 + prompt_2 + sent_2 + prompt_3
|
||||
|
||||
def preprocess_function_train(examples):
|
||||
sources = examples[source_column]
|
||||
targets = examples[target_column]
|
||||
|
||||
inputs = [prompt_process(source, target, prompt_2=task_prompt) for (source, target) in zip(sources, targets)]
|
||||
|
||||
model_inputs = tokenizer(
|
||||
inputs,
|
||||
max_length=args.max_source_length + args.max_target_length,
|
||||
padding=padding,
|
||||
truncation=True,
|
||||
return_tensors="pt",
|
||||
)
|
||||
|
||||
labels = copy.deepcopy(model_inputs)
|
||||
|
||||
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
|
||||
# padding in the loss.
|
||||
if padding == "max_length" and args.ignore_pad_token_for_loss:
|
||||
# get the length of the target tokens. -1 to kick out the <BOS> token
|
||||
target_tokens = tokenizer(targets, padding=False)
|
||||
target_len = [len(label) - 1 for label in target_tokens["input_ids"]]
|
||||
|
||||
# don't calculate the loss from source and padding (left padding)
|
||||
for i in range(len(labels["input_ids"])):
|
||||
labels["input_ids"][i, : -target_len[i]] = -100
|
||||
|
||||
model_inputs["labels"] = labels["input_ids"]
|
||||
return model_inputs
|
||||
|
||||
def preprocess_function_test(examples):
|
||||
sources = examples[source_column]
|
||||
labels = examples[target_column]
|
||||
|
||||
inputs = [source + task_prompt for source in sources]
|
||||
|
||||
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
|
||||
labels = tokenizer(labels, max_length=args.max_target_length, padding=padding, truncation=True)
|
||||
|
||||
model_inputs["labels"] = labels["input_ids"]
|
||||
|
||||
return model_inputs
|
||||
|
||||
with accelerator.main_process_first():
|
||||
train_dataset = raw_datasets["train"].map(
|
||||
preprocess_function_train,
|
||||
batched=True,
|
||||
num_proc=args.preprocessing_num_workers,
|
||||
remove_columns=column_names,
|
||||
load_from_cache_file=not args.overwrite_cache,
|
||||
desc="Running tokenizer on training dataset",
|
||||
)
|
||||
|
||||
eval_dataset = raw_datasets["test"].map(
|
||||
preprocess_function_test,
|
||||
batched=True,
|
||||
num_proc=args.preprocessing_num_workers,
|
||||
remove_columns=column_names,
|
||||
load_from_cache_file=not args.overwrite_cache,
|
||||
desc="Running tokenizer on test dataset",
|
||||
)
|
||||
|
||||
# Log a few random samples from the set:
|
||||
for index in random.sample(range(len(train_dataset)), 2):
|
||||
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
|
||||
for index in random.sample(range(len(eval_dataset)), 2):
|
||||
logger.info(f"Sample {index} of the validation set: {eval_dataset[index]}.")
|
||||
|
||||
# DataLoaders creation:
|
||||
train_dataloader = DataLoader(
|
||||
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size
|
||||
)
|
||||
eval_dataloader = DataLoader(
|
||||
eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size
|
||||
)
|
||||
|
||||
# Optimizer
|
||||
# Split weights in two groups, one with weight decay and the other not.
|
||||
no_decay = ["bias", "layer_norm.weight"]
|
||||
optimizer_grouped_parameters = [
|
||||
{
|
||||
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) and "lora" in n],
|
||||
"weight_decay": args.weight_decay,
|
||||
},
|
||||
{
|
||||
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
|
||||
"weight_decay": 0.0,
|
||||
},
|
||||
]
|
||||
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
|
||||
|
||||
# Scheduler and math around the number of training steps.
|
||||
overrode_max_train_steps = False
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
||||
if args.max_train_steps is None:
|
||||
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
||||
overrode_max_train_steps = True
|
||||
|
||||
lr_scheduler = get_scheduler(
|
||||
name=args.lr_scheduler_type,
|
||||
optimizer=optimizer,
|
||||
num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps,
|
||||
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
|
||||
)
|
||||
|
||||
# Prepare everything with our `accelerator`.
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
|
||||
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
|
||||
)
|
||||
|
||||
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
|
||||
if accelerator.distributed_type == DistributedType.TPU:
|
||||
model.tie_weights()
|
||||
|
||||
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
|
||||
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
|
||||
if overrode_max_train_steps:
|
||||
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
|
||||
# Afterwards we recalculate our number of training epochs
|
||||
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
|
||||
|
||||
# Figure out how many steps we should save the Accelerator states
|
||||
checkpointing_steps = args.checkpointing_steps
|
||||
if checkpointing_steps is not None and checkpointing_steps.isdigit():
|
||||
checkpointing_steps = int(checkpointing_steps)
|
||||
|
||||
# We need to initialize the trackers we use, and also store our configuration.
|
||||
# The trackers initializes automatically on the main process.
|
||||
if args.with_tracking:
|
||||
experiment_config = vars(args)
|
||||
# TensorBoard cannot log Enums, need the raw value
|
||||
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
|
||||
accelerator.init_trackers("clm_no_trainer", experiment_config)
|
||||
|
||||
# Train!
|
||||
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
|
||||
|
||||
logger.info("***** Running training *****")
|
||||
logger.info(f" Num examples = {len(train_dataset)}")
|
||||
logger.info(f" Num Epochs = {args.num_train_epochs}")
|
||||
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
|
||||
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
|
||||
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
|
||||
logger.info(f" Total optimization steps = {args.max_train_steps}")
|
||||
# Only show the progress bar once on each machine.
|
||||
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
|
||||
completed_steps = 0
|
||||
starting_epoch = 0
|
||||
|
||||
# Potentially load in the weights and states from a previous save
|
||||
if args.resume_from_checkpoint:
|
||||
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
|
||||
checkpoint_path = args.resume_from_checkpoint
|
||||
path = os.path.basename(args.resume_from_checkpoint)
|
||||
else:
|
||||
# Get the most recent checkpoint
|
||||
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
|
||||
dirs.sort(key=os.path.getctime)
|
||||
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
|
||||
checkpoint_path = path
|
||||
path = os.path.basename(checkpoint_path)
|
||||
|
||||
accelerator.print(f"Resumed from checkpoint: {checkpoint_path}")
|
||||
accelerator.load_state(path)
|
||||
# Extract `epoch_{i}` or `step_{i}`
|
||||
training_difference = os.path.splitext(path)[0]
|
||||
|
||||
if "epoch" in training_difference:
|
||||
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
|
||||
resume_step = None
|
||||
completed_steps = starting_epoch * num_update_steps_per_epoch
|
||||
else:
|
||||
# need to multiply `gradient_accumulation_steps` to reflect real steps
|
||||
resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps
|
||||
starting_epoch = resume_step // len(train_dataloader)
|
||||
resume_step -= starting_epoch * len(train_dataloader)
|
||||
completed_steps = resume_step // args.gradient_accumulation_steps
|
||||
|
||||
# update the progress_bar if load from checkpoint
|
||||
progress_bar.update(completed_steps)
|
||||
|
||||
for epoch in range(starting_epoch, args.num_train_epochs):
|
||||
model.train()
|
||||
if args.with_tracking:
|
||||
total_loss = 0
|
||||
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
|
||||
# We skip the first `n` batches in the dataloader when resuming from a checkpoint
|
||||
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
|
||||
else:
|
||||
active_dataloader = train_dataloader
|
||||
for step, batch in enumerate(active_dataloader):
|
||||
with accelerator.accumulate(model):
|
||||
outputs = model(**batch)
|
||||
loss = outputs.loss
|
||||
# We keep track of the loss at each epoch
|
||||
if args.with_tracking:
|
||||
total_loss += loss.detach().float()
|
||||
accelerator.backward(loss)
|
||||
if completed_steps % 50:
|
||||
accelerator.print(f"Epoch: {epoch} | Step: {completed_steps} | Loss: {loss}")
|
||||
optimizer.step()
|
||||
lr_scheduler.step()
|
||||
optimizer.zero_grad()
|
||||
|
||||
# Checks if the accelerator has performed an optimization step behind the scenes
|
||||
if accelerator.sync_gradients:
|
||||
progress_bar.update(1)
|
||||
completed_steps += 1
|
||||
|
||||
if isinstance(checkpointing_steps, int):
|
||||
if completed_steps % checkpointing_steps == 0:
|
||||
output_dir = f"step_{completed_steps}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
accelerator.save_state(output_dir)
|
||||
if completed_steps >= args.max_train_steps:
|
||||
break
|
||||
|
||||
model.eval()
|
||||
gen_kwargs = {
|
||||
"max_new_tokens": args.max_target_length,
|
||||
"temperature": args.temperature,
|
||||
"top_k": args.k,
|
||||
"top_p": args.p,
|
||||
"do_sample": True,
|
||||
}
|
||||
ans_pred_list = []
|
||||
ans_gold_list = []
|
||||
for step, batch in enumerate(eval_dataloader):
|
||||
with torch.no_grad():
|
||||
gen_kwargs["input_ids"] = batch["input_ids"]
|
||||
gen_kwargs["attention_mask"] = batch["attention_mask"]
|
||||
generated_tokens = accelerator.unwrap_model(model).generate(**gen_kwargs)
|
||||
|
||||
pred_tokens = generated_tokens[:, args.max_source_length :]
|
||||
pred_tokens = accelerator.pad_across_processes(pred_tokens, dim=1, pad_index=tokenizer.pad_token_id)
|
||||
gold_tokens = batch["labels"]
|
||||
|
||||
if not args.pad_to_max_length:
|
||||
# If we did not pad to max length, we need to pad the labels too
|
||||
gold_tokens = accelerator.pad_across_processes(
|
||||
batch["labels"], dim=1, pad_index=tokenizer.pad_token_id
|
||||
)
|
||||
|
||||
pred_tokens, gold_tokens = accelerator.gather_for_metrics((pred_tokens, gold_tokens))
|
||||
pred_tokens, gold_tokens = pred_tokens.cpu().numpy(), gold_tokens.cpu().numpy()
|
||||
|
||||
if isinstance(pred_tokens, tuple):
|
||||
pred_tokens = pred_tokens[0]
|
||||
decoded_pred = tokenizer.batch_decode(pred_tokens, skip_special_tokens=True)
|
||||
decoded_gold = tokenizer.batch_decode(gold_tokens, skip_special_tokens=True)
|
||||
|
||||
# Extract the numbers in sentences
|
||||
accelerator.print(decoded_pred)
|
||||
ans_pred_list += [extract_answer_number(sentence_pred) for sentence_pred in decoded_pred]
|
||||
ans_gold_list += [extract_answer_number(sentence_gold) for sentence_gold in decoded_gold]
|
||||
|
||||
accelerator.print(ans_pred_list)
|
||||
accelerator.print(ans_gold_list)
|
||||
accuracy = compute_accuracy(ans_gold_list, ans_pred_list)
|
||||
|
||||
logger.info(f"epoch {epoch}: accuracy: {accuracy}")
|
||||
|
||||
if args.with_tracking:
|
||||
accelerator.log(
|
||||
{
|
||||
"accuracy": accuracy,
|
||||
"train_loss": total_loss.item() / len(train_dataloader),
|
||||
"epoch": epoch,
|
||||
"step": completed_steps,
|
||||
},
|
||||
step=completed_steps,
|
||||
)
|
||||
|
||||
if args.push_to_hub and epoch < args.num_train_epochs - 1:
|
||||
accelerator.wait_for_everyone()
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
unwrapped_model.save_pretrained(
|
||||
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
|
||||
)
|
||||
if accelerator.is_main_process:
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
repo.push_to_hub(
|
||||
commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True
|
||||
)
|
||||
|
||||
if args.checkpointing_steps == "epoch":
|
||||
output_dir = f"epoch_{epoch}"
|
||||
if args.output_dir is not None:
|
||||
output_dir = os.path.join(args.output_dir, output_dir)
|
||||
accelerator.save_state(output_dir)
|
||||
|
||||
if args.with_tracking:
|
||||
accelerator.end_training()
|
||||
|
||||
if args.output_dir is not None:
|
||||
accelerator.wait_for_everyone()
|
||||
unwrapped_model = accelerator.unwrap_model(model)
|
||||
unwrapped_model.save_pretrained(
|
||||
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
|
||||
)
|
||||
if accelerator.is_main_process:
|
||||
tokenizer.save_pretrained(args.output_dir)
|
||||
if args.push_to_hub:
|
||||
repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True)
|
||||
|
||||
|
||||
PATTERN_NUMBER = re.compile(r"-?\d+\.?\d*")
|
||||
|
||||
|
||||
def extract_answer_number(sentence: str) -> float:
|
||||
sentence = sentence.replace(",", "")
|
||||
pred = PATTERN_NUMBER.findall(sentence)
|
||||
if not pred:
|
||||
return float("inf")
|
||||
segment = sentence.split("The final answer is ")
|
||||
if len(segment) > 1:
|
||||
pred_answer = segment[1]
|
||||
pred_answer = PATTERN_NUMBER.findall(pred_answer)
|
||||
if len(pred_answer) > 0:
|
||||
pred_answer = pred_answer[0]
|
||||
else:
|
||||
pred_answer = float(pred[-1])
|
||||
else:
|
||||
pred_answer = float(pred[-1])
|
||||
|
||||
if isinstance(pred_answer, str):
|
||||
try:
|
||||
pred_answer = float(pred_answer)
|
||||
except ValueError:
|
||||
pred_answer = float("inf")
|
||||
return pred_answer
|
||||
|
||||
|
||||
def compute_accuracy(pred: list, gold: list):
|
||||
acc = 0.0
|
||||
for p, g in zip(pred, gold):
|
||||
if p == g:
|
||||
acc += 1
|
||||
|
||||
return acc / len(pred)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
175
examples/lora_dreambooth/convert_kohya_ss_sd_lora_to_peft.py
Normal file
175
examples/lora_dreambooth/convert_kohya_ss_sd_lora_to_peft.py
Normal file
@ -0,0 +1,175 @@
|
||||
import argparse
|
||||
import os
|
||||
from collections import Counter
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Optional
|
||||
|
||||
import safetensors
|
||||
import torch
|
||||
from diffusers import UNet2DConditionModel
|
||||
from transformers import CLIPTextModel
|
||||
|
||||
from peft import LoraConfig, get_peft_model, get_peft_model_state_dict, set_peft_model_state_dict
|
||||
|
||||
|
||||
# Default kohya_ss LoRA replacement modules
|
||||
# https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L661
|
||||
UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"]
|
||||
UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"]
|
||||
TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"]
|
||||
LORA_PREFIX_UNET = "lora_unet"
|
||||
LORA_PREFIX_TEXT_ENCODER = "lora_te"
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoRAInfo:
|
||||
kohya_key: str
|
||||
peft_key: str
|
||||
alpha: Optional[float] = None
|
||||
rank: Optional[int] = None
|
||||
lora_A: Optional[torch.Tensor] = None
|
||||
lora_B: Optional[torch.Tensor] = None
|
||||
|
||||
def peft_state_dict(self) -> Dict[str, torch.Tensor]:
|
||||
if self.lora_A is None or self.lora_B is None:
|
||||
raise ValueError("At least one of lora_A or lora_B is None, they must both be provided")
|
||||
return {f"{peft_key}.lora_A.weight": self.lora_A, f"{peft_key}.lora_B.weight": self.lora_A}
|
||||
|
||||
|
||||
def construct_peft_loraconfig(info: Dict[str, LoRAInfo]) -> LoraConfig:
|
||||
"""Constructs LoraConfig from data extracted from kohya checkpoint
|
||||
|
||||
Args:
|
||||
info (Dict[str, LoRAInfo]): Information extracted from kohya checkpoint
|
||||
|
||||
Returns:
|
||||
LoraConfig: config for constructing LoRA
|
||||
"""
|
||||
|
||||
# Unpack all ranks and alphas
|
||||
ranks = {x[0]: x[1].rank for x in info.items()}
|
||||
alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()}
|
||||
|
||||
# Determine which modules needs to be transformed
|
||||
target_modules = list(info.keys())
|
||||
|
||||
# Determine most common rank and alpha
|
||||
r = Counter(ranks.values()).most_common(1)[0]
|
||||
lora_alpha = Counter(alphas.values()).most_common(1)[0]
|
||||
|
||||
# Determine which modules have different rank and alpha
|
||||
rank_pattern = dict(filter(lambda x: x[1] != r, ranks.items()))
|
||||
alpha_pattern = dict(filter(lambda x: x[1] != lora_alpha, alphas.items()))
|
||||
|
||||
config = LoraConfig(
|
||||
r=r,
|
||||
lora_alpha=lora_alpha,
|
||||
target_modules=target_modules,
|
||||
lora_dropout=0.0,
|
||||
bias="none",
|
||||
init_lora_weights=False,
|
||||
rank_pattern=rank_pattern,
|
||||
alpha_pattern=alpha_pattern,
|
||||
)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def combine_peft_state_dict(info: Dict[str, LoRAInfo]) -> Dict[str, torch.Tensor]:
|
||||
result = {}
|
||||
for key_name, key_info in info.items():
|
||||
result[f"base_model.model.{key_name}.lora_A.weight"] = key_info.lora_A
|
||||
result[f"base_model.model.{key_name}.lora_B.weight"] = key_info.lora_B
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument("--sd_checkpoint", default=None, type=str, required=True, help="SD checkpoint to use")
|
||||
|
||||
parser.add_argument(
|
||||
"--kohya_lora_path", default=None, type=str, required=True, help="Path to kohya_ss trained LoRA"
|
||||
)
|
||||
|
||||
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
|
||||
|
||||
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Load all models that we need to add adapter to
|
||||
text_encoder = CLIPTextModel.from_pretrained(args.sd_checkpoint, subfolder="text_encoder")
|
||||
unet = UNet2DConditionModel.from_pretrained(args.sd_checkpoint, subfolder="unet")
|
||||
|
||||
# Construct possible mapping from kohya keys to peft keys
|
||||
models_keys = {}
|
||||
for model, model_key, model_name in [
|
||||
(text_encoder, LORA_PREFIX_TEXT_ENCODER, "text_encoder"),
|
||||
(unet, LORA_PREFIX_UNET, "unet"),
|
||||
]:
|
||||
models_keys.update(
|
||||
{
|
||||
f"{model_key}.{peft_key}".replace(".", "_"): peft_key
|
||||
for peft_key in (x[0] for x in model.named_modules())
|
||||
}
|
||||
)
|
||||
|
||||
# Store conversion info (model_type -> peft_key -> LoRAInfo)
|
||||
lora_info: Dict[str, Dict[str, LoRAInfo]] = {
|
||||
"text_encoder": {},
|
||||
"unet": {},
|
||||
}
|
||||
|
||||
# Open kohya_ss checkpoint
|
||||
with safetensors.safe_open(args.kohya_lora_path, framework="pt", device="cpu") as f:
|
||||
# Extract information about LoRA structure
|
||||
metadata = f.metadata()
|
||||
|
||||
# Iterate through available info and unpack all the values
|
||||
for key in f.keys():
|
||||
kohya_key, kohya_type = key.split(".")[:2]
|
||||
|
||||
# Find which model this key belongs to
|
||||
if kohya_key.startswith(LORA_PREFIX_TEXT_ENCODER):
|
||||
model_type = "text_encoder"
|
||||
elif kohya_key.startswith(LORA_PREFIX_UNET):
|
||||
model_type = "unet"
|
||||
else:
|
||||
raise ValueError(f"Cannot determine model for key: {key}")
|
||||
|
||||
# Find corresponding peft key
|
||||
if kohya_key not in models_keys:
|
||||
raise ValueError(f"Cannot find corresponding key for diffusers/transformers model: {kohya_key}")
|
||||
peft_key = models_keys[kohya_key]
|
||||
|
||||
if peft_key not in lora_info[model_type]:
|
||||
lora_info[model_type][peft_key] = LoRAInfo(kohya_key=kohya_key, peft_key=peft_key)
|
||||
|
||||
if kohya_type == "alpha":
|
||||
lora_info[model_type][peft_key].alpha = f.get_tensor(key).item()
|
||||
elif kohya_type == "lora_down":
|
||||
tensor = f.get_tensor(key)
|
||||
lora_info[model_type][peft_key].lora_A = tensor
|
||||
lora_info[model_type][peft_key].rank = tensor.shape[0]
|
||||
elif kohya_type == "lora_up":
|
||||
tensor = f.get_tensor(key)
|
||||
lora_info[model_type][peft_key].lora_B = f.get_tensor(key)
|
||||
lora_info[model_type][peft_key].rank = tensor.shape[1]
|
||||
else:
|
||||
raise ValueError(f"Unknown weight name in key: {key} - {kohya_type}")
|
||||
|
||||
# Process each model
|
||||
for model, model_name in [(text_encoder, "text_encoder"), (unet, "unet")]:
|
||||
config = construct_peft_loraconfig(lora_info[model_name])
|
||||
model = get_peft_model(model, config)
|
||||
|
||||
keys_peft = list(get_peft_model_state_dict(model).keys())
|
||||
keys_new = list(combine_peft_state_dict(lora_info[model_name]).keys())
|
||||
|
||||
set_peft_model_state_dict(model, combine_peft_state_dict(lora_info[model_name]))
|
||||
|
||||
if args.half:
|
||||
model.to(torch.float16)
|
||||
|
||||
# Save model to disk
|
||||
model.save_pretrained(os.path.join(args.dump_path, model_name))
|
101
examples/lora_dreambooth/convert_peft_sd_lora_to_kohya_ss.py
Normal file
101
examples/lora_dreambooth/convert_peft_sd_lora_to_kohya_ss.py
Normal file
@ -0,0 +1,101 @@
|
||||
import argparse
|
||||
import os
|
||||
from typing import Dict
|
||||
|
||||
import torch
|
||||
from diffusers import UNet2DConditionModel
|
||||
from safetensors.torch import save_file
|
||||
from transformers import CLIPTextModel
|
||||
|
||||
from peft import PeftModel, get_peft_model_state_dict
|
||||
|
||||
|
||||
# Default kohya_ss LoRA replacement modules
|
||||
# https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L664
|
||||
LORA_PREFIX_UNET = "lora_unet"
|
||||
LORA_PREFIX_TEXT_ENCODER = "lora_te"
|
||||
LORA_ADAPTER_NAME = "default"
|
||||
|
||||
|
||||
def get_module_kohya_state_dict(
|
||||
module: PeftModel, prefix: str, dtype: torch.dtype, adapter_name: str = LORA_ADAPTER_NAME
|
||||
) -> Dict[str, torch.Tensor]:
|
||||
kohya_ss_state_dict = {}
|
||||
for peft_key, weight in get_peft_model_state_dict(module, adapter_name=adapter_name).items():
|
||||
kohya_key = peft_key.replace("base_model.model", prefix)
|
||||
kohya_key = kohya_key.replace("lora_A", "lora_down")
|
||||
kohya_key = kohya_key.replace("lora_B", "lora_up")
|
||||
kohya_key = kohya_key.replace(".", "_", kohya_key.count(".") - 2)
|
||||
kohya_ss_state_dict[kohya_key] = weight.to(dtype)
|
||||
|
||||
# Set alpha parameter
|
||||
if "lora_down" in kohya_key:
|
||||
alpha_key = f'{kohya_key.split(".")[0]}.alpha'
|
||||
kohya_ss_state_dict[alpha_key] = torch.tensor(module.peft_config[adapter_name].lora_alpha).to(dtype)
|
||||
|
||||
return kohya_ss_state_dict
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
parser.add_argument(
|
||||
"--sd_checkpoint",
|
||||
default=None,
|
||||
type=str,
|
||||
required=True,
|
||||
help="Path to pretrained model or model identifier from huggingface.co/models.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--sd_checkpoint_revision",
|
||||
type=str,
|
||||
default=None,
|
||||
required=False,
|
||||
help="Revision of pretrained model identifier from huggingface.co/models.",
|
||||
)
|
||||
|
||||
parser.add_argument("--peft_lora_path", default=None, type=str, required=True, help="Path to peft trained LoRA")
|
||||
|
||||
parser.add_argument(
|
||||
"--dump_path",
|
||||
default=None,
|
||||
type=str,
|
||||
required=True,
|
||||
help="Path to the output safetensors file for use with webui.",
|
||||
)
|
||||
|
||||
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Store kohya_ss state dict
|
||||
kohya_ss_state_dict = {}
|
||||
dtype = torch.float16 if args.half else torch.float32
|
||||
|
||||
# Load Text Encoder LoRA model
|
||||
text_encoder_peft_lora_path = os.path.join(args.peft_lora_path, "text_encoder")
|
||||
if os.path.exists(text_encoder_peft_lora_path):
|
||||
text_encoder = CLIPTextModel.from_pretrained(
|
||||
args.sd_checkpoint, subfolder="text_encoder", revision=args.sd_checkpoint_revision
|
||||
)
|
||||
text_encoder = PeftModel.from_pretrained(
|
||||
text_encoder, text_encoder_peft_lora_path, adapter_name=LORA_ADAPTER_NAME
|
||||
)
|
||||
kohya_ss_state_dict.update(
|
||||
get_module_kohya_state_dict(text_encoder, LORA_PREFIX_TEXT_ENCODER, dtype, LORA_ADAPTER_NAME)
|
||||
)
|
||||
|
||||
# Load UNet LoRA model
|
||||
unet_peft_lora_path = os.path.join(args.peft_lora_path, "unet")
|
||||
if os.path.exists(unet_peft_lora_path):
|
||||
unet = UNet2DConditionModel.from_pretrained(
|
||||
args.sd_checkpoint, subfolder="unet", revision=args.sd_checkpoint_revision
|
||||
)
|
||||
unet = PeftModel.from_pretrained(unet, unet_peft_lora_path, adapter_name=LORA_ADAPTER_NAME)
|
||||
kohya_ss_state_dict.update(get_module_kohya_state_dict(unet, LORA_PREFIX_UNET, dtype, LORA_ADAPTER_NAME))
|
||||
|
||||
# Save state dict
|
||||
save_file(
|
||||
kohya_ss_state_dict,
|
||||
args.dump_path,
|
||||
)
|
@ -1,10 +1,11 @@
|
||||
transformers
|
||||
accelerate
|
||||
loralib
|
||||
evaluate
|
||||
tqdm
|
||||
datasets
|
||||
diffusers
|
||||
Pillow
|
||||
torchvision
|
||||
huggingface_hub
|
||||
huggingface_hub
|
||||
safetensors
|
||||
wandb
|
@ -7,6 +7,7 @@ import math
|
||||
import os
|
||||
import threading
|
||||
import warnings
|
||||
from contextlib import nullcontext
|
||||
from pathlib import Path
|
||||
from typing import Optional
|
||||
|
||||
@ -213,6 +214,17 @@ def parse_args(input_args=None):
|
||||
help="Bias type for Lora. Can be 'none', 'all' or 'lora_only', only used if use_lora and `train_text_encoder` are True",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--num_dataloader_workers", type=int, default=1, help="Num of workers for the training dataloader."
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--no_tracemalloc",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="Flag to stop memory allocation tracing during training. This could speed up training on Windows.",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
|
||||
)
|
||||
@ -329,6 +341,18 @@ def parse_args(input_args=None):
|
||||
' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
|
||||
),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--wandb_key",
|
||||
type=str,
|
||||
default=None,
|
||||
help=("If report to option is set to wandb, api-key for wandb used for login to wandb "),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--wandb_project_name",
|
||||
type=str,
|
||||
default=None,
|
||||
help=("If report to option is set to wandb, project name in wandb for log tracking "),
|
||||
)
|
||||
parser.add_argument(
|
||||
"--mixed_precision",
|
||||
type=str,
|
||||
@ -569,9 +593,13 @@ def main(args):
|
||||
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
||||
mixed_precision=args.mixed_precision,
|
||||
log_with=args.report_to,
|
||||
logging_dir=logging_dir,
|
||||
project_dir=logging_dir,
|
||||
)
|
||||
if args.report_to == "wandb":
|
||||
import wandb
|
||||
|
||||
wandb.login(key=args.wandb_key)
|
||||
wandb.init(project=args.wandb_project_name)
|
||||
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
|
||||
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
|
||||
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
|
||||
@ -783,7 +811,7 @@ def main(args):
|
||||
batch_size=args.train_batch_size,
|
||||
shuffle=True,
|
||||
collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
|
||||
num_workers=1,
|
||||
num_workers=args.num_dataloader_workers,
|
||||
)
|
||||
|
||||
# Scheduler and math around the number of training steps.
|
||||
@ -877,12 +905,14 @@ def main(args):
|
||||
unet.train()
|
||||
if args.train_text_encoder:
|
||||
text_encoder.train()
|
||||
with TorchTracemalloc() as tracemalloc:
|
||||
with TorchTracemalloc() if not args.no_tracemalloc else nullcontext() as tracemalloc:
|
||||
for step, batch in enumerate(train_dataloader):
|
||||
# Skip steps until we reach the resumed step
|
||||
if args.resume_from_checkpoint and epoch == first_epoch and step < resume_step:
|
||||
if step % args.gradient_accumulation_steps == 0:
|
||||
progress_bar.update(1)
|
||||
if args.report_to == "wandb":
|
||||
accelerator.print(progress_bar)
|
||||
continue
|
||||
|
||||
with accelerator.accumulate(unet):
|
||||
@ -948,6 +978,8 @@ def main(args):
|
||||
# Checks if the accelerator has performed an optimization step behind the scenes
|
||||
if accelerator.sync_gradients:
|
||||
progress_bar.update(1)
|
||||
if args.report_to == "wandb":
|
||||
accelerator.print(progress_bar)
|
||||
global_step += 1
|
||||
|
||||
# if global_step % args.checkpointing_steps == 0:
|
||||
@ -1014,23 +1046,29 @@ def main(args):
|
||||
if global_step >= args.max_train_steps:
|
||||
break
|
||||
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
|
||||
accelerator.print("GPU Memory before entering the train : {}".format(b2mb(tracemalloc.begin)))
|
||||
accelerator.print("GPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
|
||||
accelerator.print("GPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
|
||||
accelerator.print(
|
||||
"GPU Total Peak Memory consumed during the train (max): {}".format(
|
||||
tracemalloc.peaked + b2mb(tracemalloc.begin)
|
||||
)
|
||||
)
|
||||
|
||||
accelerator.print("CPU Memory before entering the train : {}".format(b2mb(tracemalloc.cpu_begin)))
|
||||
accelerator.print("CPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.cpu_used))
|
||||
accelerator.print("CPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.cpu_peaked))
|
||||
accelerator.print(
|
||||
"CPU Total Peak Memory consumed during the train (max): {}".format(
|
||||
tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)
|
||||
if not args.no_tracemalloc:
|
||||
accelerator.print("GPU Memory before entering the train : {}".format(b2mb(tracemalloc.begin)))
|
||||
accelerator.print("GPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used))
|
||||
accelerator.print("GPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked))
|
||||
accelerator.print(
|
||||
"GPU Total Peak Memory consumed during the train (max): {}".format(
|
||||
tracemalloc.peaked + b2mb(tracemalloc.begin)
|
||||
)
|
||||
)
|
||||
|
||||
accelerator.print("CPU Memory before entering the train : {}".format(b2mb(tracemalloc.cpu_begin)))
|
||||
accelerator.print(
|
||||
"CPU Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.cpu_used)
|
||||
)
|
||||
accelerator.print(
|
||||
"CPU Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.cpu_peaked)
|
||||
)
|
||||
accelerator.print(
|
||||
"CPU Total Peak Memory consumed during the train (max): {}".format(
|
||||
tracemalloc.cpu_peaked + b2mb(tracemalloc.cpu_begin)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
# Create the pipeline using using the trained modules and save it.
|
||||
accelerator.wait_for_everyone()
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user