1112lee commited on
Commit
9d6cb8e
·
verified ·
1 Parent(s): 13ea1af

nice-model

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +5 -0
  2. README.md +54 -0
  3. adapter_config.json +37 -0
  4. adapter_model.bin +3 -0
  5. input/commonsense/commonsense_170k.json +3 -0
  6. input/commonsensetest/dataset/ARC-Challenge/test.json +0 -0
  7. input/commonsensetest/dataset/ARC-Challenge/train.json +0 -0
  8. input/commonsensetest/dataset/ARC-Easy/test.json +0 -0
  9. input/commonsensetest/dataset/ARC-Easy/train.json +0 -0
  10. input/commonsensetest/dataset/boolq/test.json +0 -0
  11. input/commonsensetest/dataset/boolq/train.json +0 -0
  12. input/commonsensetest/dataset/hellaswag/test.json +3 -0
  13. input/commonsensetest/dataset/hellaswag/train.json +3 -0
  14. input/commonsensetest/dataset/openbookqa/test.json +0 -0
  15. input/commonsensetest/dataset/openbookqa/train.json +0 -0
  16. input/commonsensetest/dataset/piqa/test.json +0 -0
  17. input/commonsensetest/dataset/piqa/train.json +0 -0
  18. input/commonsensetest/dataset/social_i_qa/test.json +0 -0
  19. input/commonsensetest/dataset/social_i_qa/train.json +3 -0
  20. input/commonsensetest/dataset/winogrande/test.json +0 -0
  21. input/commonsensetest/dataset/winogrande/train.json +3 -0
  22. lib/kaggle/gcp.py +1 -0
  23. training_args.bin +3 -0
  24. working/peft/.github/ISSUE_TEMPLATE/bug-report.yml +70 -0
  25. working/peft/.github/ISSUE_TEMPLATE/feature-request.yml +30 -0
  26. working/peft/.github/workflows/build_docker_images.yml +217 -0
  27. working/peft/.github/workflows/build_documentation.yml +20 -0
  28. working/peft/.github/workflows/build_pr_documentation.yml +17 -0
  29. working/peft/.github/workflows/integrations_tests.yml +82 -0
  30. working/peft/.github/workflows/nightly-bnb.yml +139 -0
  31. working/peft/.github/workflows/nightly.yml +108 -0
  32. working/peft/.github/workflows/stale.yml +27 -0
  33. working/peft/.github/workflows/test-docker-build.yml +59 -0
  34. working/peft/.github/workflows/tests-main.yml +28 -0
  35. working/peft/.github/workflows/tests.yml +53 -0
  36. working/peft/.github/workflows/torch_compile_tests.yml +42 -0
  37. working/peft/.github/workflows/upload_pr_documentation.yml +16 -0
  38. working/peft/.gitignore +141 -0
  39. working/peft/.pre-commit-config.yaml +13 -0
  40. working/peft/LICENSE +201 -0
  41. working/peft/Makefile +61 -0
  42. working/peft/README.md +158 -0
  43. working/peft/__init__.py +98 -0
  44. working/peft/__pycache__/__init__.cpython-310.pyc +0 -0
  45. working/peft/__pycache__/auto.cpython-310.pyc +0 -0
  46. working/peft/__pycache__/config.cpython-310.pyc +0 -0
  47. working/peft/__pycache__/import_utils.cpython-310.pyc +0 -0
  48. working/peft/__pycache__/mapping.cpython-310.pyc +0 -0
  49. working/peft/__pycache__/mixed_model.cpython-310.pyc +0 -0
  50. working/peft/__pycache__/peft_model.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -33,3 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ input/commonsense/commonsense_170k.json filter=lfs diff=lfs merge=lfs -text
37
+ input/commonsensetest/dataset/hellaswag/test.json filter=lfs diff=lfs merge=lfs -text
38
+ input/commonsensetest/dataset/hellaswag/train.json filter=lfs diff=lfs merge=lfs -text
39
+ input/commonsensetest/dataset/social_i_qa/train.json filter=lfs diff=lfs merge=lfs -text
40
+ input/commonsensetest/dataset/winogrande/train.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ library_name: peft
4
+ tags:
5
+ - generated_from_trainer
6
+ base_model: openai-community/gpt2-medium
7
+ model-index:
8
+ - name: kaggle
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # kaggle
16
+
17
+ This model is a fine-tuned version of [openai-community/gpt2-medium](https://huggingface.co/openai-community/gpt2-medium) on an unknown dataset.
18
+
19
+ ## Model description
20
+
21
+ More information needed
22
+
23
+ ## Intended uses & limitations
24
+
25
+ More information needed
26
+
27
+ ## Training and evaluation data
28
+
29
+ More information needed
30
+
31
+ ## Training procedure
32
+
33
+ ### Training hyperparameters
34
+
35
+ The following hyperparameters were used during training:
36
+ - learning_rate: 0.0003
37
+ - train_batch_size: 2
38
+ - eval_batch_size: 8
39
+ - seed: 42
40
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
41
+ - lr_scheduler_type: linear
42
+ - num_epochs: 1
43
+
44
+ ### Training results
45
+
46
+
47
+
48
+ ### Framework versions
49
+
50
+ - PEFT 0.11.2.dev0
51
+ - Transformers 4.41.1
52
+ - Pytorch 2.0.0
53
+ - Datasets 2.19.1
54
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": null,
5
+ "beta1": 0.85,
6
+ "beta2": 0.85,
7
+ "bias": "none",
8
+ "deltaT": 1,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "init_r": 32,
13
+ "layer_replication": null,
14
+ "layers_pattern": null,
15
+ "layers_to_transform": null,
16
+ "loftq_config": {},
17
+ "lora_alpha": 16,
18
+ "lora_dropout": 0.2,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "orth_reg_weight": 0.5,
23
+ "peft_type": "ADALORA",
24
+ "r": 8,
25
+ "rank_pattern": null,
26
+ "revision": null,
27
+ "target_modules": [
28
+ "c_attn"
29
+ ],
30
+ "target_r": 8,
31
+ "task_type": "CAUSAL_LM",
32
+ "tfinal": 0,
33
+ "tinit": 0,
34
+ "total_step": null,
35
+ "use_dora": false,
36
+ "use_rslora": false
37
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e372ee34a024eea072a5ca62c2c20eb3d491d4ff0adbf06239e5db887564f6e3
3
+ size 12613193
input/commonsense/commonsense_170k.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4651a3e707fbe28be03f486f96fcf57f8c38b2173f3e38812003108970793ac8
3
+ size 96529722
input/commonsensetest/dataset/ARC-Challenge/test.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/ARC-Challenge/train.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/ARC-Easy/test.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/ARC-Easy/train.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/boolq/test.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/boolq/train.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/hellaswag/test.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ea7e99795e63a01f2f5e9ddbc4dca964d9ebb22f04201e64d1ac043be51ab61
3
+ size 11099046
input/commonsensetest/dataset/hellaswag/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:729aeafa3f600650a92e1c992a888528ac8a1ccd562408736eee66bed2183b1a
3
+ size 43072851
input/commonsensetest/dataset/openbookqa/test.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/openbookqa/train.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/piqa/test.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/piqa/train.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/social_i_qa/test.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/social_i_qa/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ca995d6055a6be053bb54b4b4c7abe4cd0323940079071932c1f9129d589b17
3
+ size 14379571
input/commonsensetest/dataset/winogrande/test.json ADDED
The diff for this file is too large to render. See raw diff
 
input/commonsensetest/dataset/winogrande/train.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:154d4fc3ef6b065aff1b738150f3fbd8d497f8e32d7aaae114991a2ad912ae79
3
+ size 24772666
lib/kaggle/gcp.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from kaggle_gcp import *
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:792f636757229e42f17c7ae4b58724debe0276e945bb45e65c50e964f8b23d52
3
+ size 4603
working/peft/.github/ISSUE_TEMPLATE/bug-report.yml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "\U0001F41B Bug Report"
2
+ description: Submit a bug report to help us improve the library
3
+ body:
4
+ - type: textarea
5
+ id: system-info
6
+ attributes:
7
+ label: System Info
8
+ description: Please share your relevant system information with us
9
+ placeholder: peft & accelerate & transformers version, platform, python version, ...
10
+ validations:
11
+ required: true
12
+
13
+ - type: textarea
14
+ id: who-can-help
15
+ attributes:
16
+ label: Who can help?
17
+ description: |
18
+ Your issue will be replied to more quickly if you can figure out the right person to tag with @.
19
+ If you know how to use git blame, that is the easiest way, otherwise, here is a rough guide of **who to tag**.
20
+
21
+ All issues are read by one of the core maintainers, so if you don't know who to tag, just leave this blank and
22
+ a core maintainer will ping the right person.
23
+
24
+ Please tag fewer than 3 people.
25
+
26
+ Library: @pacman100 @younesbelkada @benjaminbossan @sayakpaul
27
+
28
+ Documentation: @stevhliu
29
+
30
+ placeholder: "@Username ..."
31
+
32
+ - type: checkboxes
33
+ id: information-scripts-examples
34
+ attributes:
35
+ label: Information
36
+ description: 'The problem arises when using:'
37
+ options:
38
+ - label: "The official example scripts"
39
+ - label: "My own modified scripts"
40
+
41
+ - type: checkboxes
42
+ id: information-tasks
43
+ attributes:
44
+ label: Tasks
45
+ description: "The tasks I am working on are:"
46
+ options:
47
+ - label: "An officially supported task in the `examples` folder"
48
+ - label: "My own task or dataset (give details below)"
49
+
50
+ - type: textarea
51
+ id: reproduction
52
+ validations:
53
+ required: true
54
+ attributes:
55
+ label: Reproduction
56
+ description: |
57
+ Please provide a code sample that reproduces the problem you ran into. It can be a Colab link or just a code snippet.
58
+ Please provide the simplest reproducer as possible so that we can quickly fix the issue. When you paste
59
+ the error message, please include the full traceback.
60
+
61
+ placeholder: |
62
+ Reproducer:
63
+
64
+ - type: textarea
65
+ id: expected-behavior
66
+ validations:
67
+ required: true
68
+ attributes:
69
+ label: Expected behavior
70
+ description: "A clear and concise description of what you would expect to happen."
working/peft/.github/ISSUE_TEMPLATE/feature-request.yml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: "\U0001F680 Feature request"
2
+ description: Submit a proposal/request for a new feature
3
+ labels: [ "feature" ]
4
+ body:
5
+ - type: textarea
6
+ id: feature-request
7
+ validations:
8
+ required: true
9
+ attributes:
10
+ label: Feature request
11
+ description: |
12
+ A clear and concise description of the feature proposal. Please provide a link to the paper and code in case they exist.
13
+
14
+ - type: textarea
15
+ id: motivation
16
+ validations:
17
+ required: true
18
+ attributes:
19
+ label: Motivation
20
+ description: |
21
+ Please outline the motivation for the proposal. Is your feature request related to a problem?
22
+
23
+ - type: textarea
24
+ id: contribution
25
+ validations:
26
+ required: true
27
+ attributes:
28
+ label: Your contribution
29
+ description: |
30
+ Is there any way that you could help, e.g. by submitting a PR?
working/peft/.github/workflows/build_docker_images.yml ADDED
@@ -0,0 +1,217 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build Docker images (scheduled)
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ workflow_call:
6
+ schedule:
7
+ - cron: "0 1 * * *"
8
+
9
+ concurrency:
10
+ group: docker-image-builds
11
+ cancel-in-progress: false
12
+
13
+ env:
14
+ CI_SLACK_CHANNEL: ${{ secrets.CI_DOCKER_CHANNEL }}
15
+
16
+ jobs:
17
+ latest-cpu:
18
+ name: "Latest Peft CPU [dev]"
19
+ runs-on: ubuntu-latest
20
+ steps:
21
+ - name: Cleanup disk
22
+ run: |
23
+ sudo ls -l /usr/local/lib/
24
+ sudo ls -l /usr/share/
25
+ sudo du -sh /usr/local/lib/
26
+ sudo du -sh /usr/share/
27
+ sudo rm -rf /usr/local/lib/android
28
+ sudo rm -rf /usr/share/dotnet
29
+ sudo du -sh /usr/local/lib/
30
+ sudo du -sh /usr/share/
31
+ - name: Set up Docker Buildx
32
+ uses: docker/setup-buildx-action@v1
33
+ - name: Check out code
34
+ uses: actions/checkout@v3
35
+ - name: Login to DockerHub
36
+ uses: docker/login-action@v2
37
+ with:
38
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
39
+ password: ${{ secrets.DOCKERHUB_PASSWORD }}
40
+
41
+ - name: Build and Push CPU
42
+ uses: docker/build-push-action@v4
43
+ with:
44
+ context: ./docker/peft-cpu
45
+ push: true
46
+ tags: huggingface/peft-cpu
47
+
48
+ - name: Post to Slack
49
+ if: always()
50
+ uses: huggingface/hf-workflows/.github/actions/post-slack@main
51
+ with:
52
+ slack_channel: "C06LKJB31RU"
53
+ title: 🤗 Results of the PEFT-CPU docker build
54
+ status: ${{ job.status }}
55
+ slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
56
+
57
+ latest-cuda:
58
+ name: "Latest Peft GPU [dev]"
59
+ runs-on: ubuntu-latest
60
+ steps:
61
+ - name: Cleanup disk
62
+ run: |
63
+ sudo ls -l /usr/local/lib/
64
+ sudo ls -l /usr/share/
65
+ sudo du -sh /usr/local/lib/
66
+ sudo du -sh /usr/share/
67
+ sudo rm -rf /usr/local/lib/android
68
+ sudo rm -rf /usr/share/dotnet
69
+ sudo du -sh /usr/local/lib/
70
+ sudo du -sh /usr/share/
71
+ - name: Set up Docker Buildx
72
+ uses: docker/setup-buildx-action@v1
73
+ - name: Check out code
74
+ uses: actions/checkout@v3
75
+ - name: Login to DockerHub
76
+ uses: docker/login-action@v1
77
+ with:
78
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
79
+ password: ${{ secrets.DOCKERHUB_PASSWORD }}
80
+
81
+ - name: Build and Push GPU
82
+ uses: docker/build-push-action@v4
83
+ with:
84
+ context: ./docker/peft-gpu
85
+ push: true
86
+ tags: huggingface/peft-gpu
87
+
88
+ - name: Post to Slack
89
+ if: always()
90
+ uses: huggingface/hf-workflows/.github/actions/post-slack@main
91
+ with:
92
+ slack_channel: "C06LKJB31RU"
93
+ title: 🤗 Results of the PEFT-GPU docker build
94
+ status: ${{ job.status }}
95
+ slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
96
+
97
+ latest-cuda-bnb-source:
98
+ name: "Latest Peft GPU + bnb source [dev]"
99
+ runs-on: ubuntu-latest
100
+ steps:
101
+ - name: Cleanup disk
102
+ run: |
103
+ sudo ls -l /usr/local/lib/
104
+ sudo ls -l /usr/share/
105
+ sudo du -sh /usr/local/lib/
106
+ sudo du -sh /usr/share/
107
+ sudo rm -rf /usr/local/lib/android
108
+ sudo rm -rf /usr/share/dotnet
109
+ sudo du -sh /usr/local/lib/
110
+ sudo du -sh /usr/share/
111
+ - name: Set up Docker Buildx
112
+ uses: docker/setup-buildx-action@v1
113
+ - name: Check out code
114
+ uses: actions/checkout@v3
115
+ - name: Login to DockerHub
116
+ uses: docker/login-action@v1
117
+ with:
118
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
119
+ password: ${{ secrets.DOCKERHUB_PASSWORD }}
120
+
121
+ - name: Build and Push GPU
122
+ uses: docker/build-push-action@v4
123
+ with:
124
+ context: ./docker/peft-gpu-bnb-source
125
+ push: true
126
+ tags: huggingface/peft-gpu-bnb-source
127
+
128
+ - name: Post to Slack
129
+ if: always()
130
+ uses: huggingface/hf-workflows/.github/actions/post-slack@main
131
+ with:
132
+ slack_channel: "C06LKJB31RU"
133
+ title: 🤗 Results of the PEFT-GPU (bnb source / HF latest) docker build
134
+ status: ${{ job.status }}
135
+ slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
136
+
137
+ latest-cuda-bnb-source-latest:
138
+ name: "Latest Peft GPU + bnb source [accelerate / peft / transformers latest]"
139
+ runs-on: ubuntu-latest
140
+ steps:
141
+ - name: Cleanup disk
142
+ run: |
143
+ sudo ls -l /usr/local/lib/
144
+ sudo ls -l /usr/share/
145
+ sudo du -sh /usr/local/lib/
146
+ sudo du -sh /usr/share/
147
+ sudo rm -rf /usr/local/lib/android
148
+ sudo rm -rf /usr/share/dotnet
149
+ sudo du -sh /usr/local/lib/
150
+ sudo du -sh /usr/share/
151
+ - name: Set up Docker Buildx
152
+ uses: docker/setup-buildx-action@v1
153
+ - name: Check out code
154
+ uses: actions/checkout@v3
155
+ - name: Login to DockerHub
156
+ uses: docker/login-action@v1
157
+ with:
158
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
159
+ password: ${{ secrets.DOCKERHUB_PASSWORD }}
160
+
161
+ - name: Build and Push GPU
162
+ uses: docker/build-push-action@v4
163
+ with:
164
+ context: ./docker/peft-gpu-bnb-latest
165
+ push: true
166
+ tags: huggingface/peft-gpu-bnb-latest
167
+
168
+ - name: Post to Slack
169
+ if: always()
170
+ uses: huggingface/hf-workflows/.github/actions/post-slack@main
171
+ with:
172
+ slack_channel: "C06LKJB31RU"
173
+ title: 🤗 Results of the PEFT-GPU (bnb source / HF source) docker build
174
+ status: ${{ job.status }}
175
+ slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
176
+
177
+ latest-cuda-bnb-source-multi:
178
+ name: "Latest Peft GPU + bnb (multi-backend) source [accelerate / peft / transformers source]"
179
+ runs-on: ubuntu-latest
180
+ steps:
181
+ - name: Cleanup disk
182
+ run: |
183
+ sudo ls -l /usr/local/lib/
184
+ sudo ls -l /usr/share/
185
+ sudo du -sh /usr/local/lib/
186
+ sudo du -sh /usr/share/
187
+ sudo rm -rf /usr/local/lib/android
188
+ sudo rm -rf /usr/share/dotnet
189
+ sudo du -sh /usr/local/lib/
190
+ sudo du -sh /usr/share/
191
+ - name: Set up Docker Buildx
192
+ uses: docker/setup-buildx-action@v1
193
+ - name: Check out code
194
+ uses: actions/checkout@v3
195
+ - name: Login to DockerHub
196
+ uses: docker/login-action@v1
197
+ with:
198
+ username: ${{ secrets.DOCKERHUB_USERNAME }}
199
+ password: ${{ secrets.DOCKERHUB_PASSWORD }}
200
+
201
+ - name: Build and Push GPU
202
+ uses: docker/build-push-action@v4
203
+ with:
204
+ context: ./docker/peft-gpu-bnb-multi-source
205
+ push: true
206
+ tags: huggingface/peft-gpu-bnb-multi-source
207
+
208
+ - name: Post to Slack
209
+ if: always()
210
+ uses: huggingface/hf-workflows/.github/actions/post-slack@main
211
+ with:
212
+ slack_channel: "C06LKJB31RU"
213
+ title: 🤗 Results of the PEFT-GPU (bnb source multi-backend / HF latest) docker build
214
+ status: ${{ job.status }}
215
+ slack_token: ${{ secrets.SLACK_CIFEEDBACK_BOT_TOKEN }}
216
+
217
+
working/peft/.github/workflows/build_documentation.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build documentation
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ - doc-builder*
8
+ - v*-release
9
+
10
+ jobs:
11
+ build:
12
+ uses: huggingface/doc-builder/.github/workflows/build_main_documentation.yml@main
13
+ with:
14
+ commit_sha: ${{ github.sha }}
15
+ package: peft
16
+ notebook_folder: peft_docs
17
+ custom_container: huggingface/transformers-doc-builder
18
+ secrets:
19
+ token: ${{ secrets.HUGGINGFACE_PUSH }}
20
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
working/peft/.github/workflows/build_pr_documentation.yml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build PR Documentation
2
+
3
+ on:
4
+ pull_request:
5
+
6
+ concurrency:
7
+ group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
8
+ cancel-in-progress: true
9
+
10
+ jobs:
11
+ build:
12
+ uses: huggingface/doc-builder/.github/workflows/build_pr_documentation.yml@main
13
+ with:
14
+ commit_sha: ${{ github.event.pull_request.head.sha }}
15
+ pr_number: ${{ github.event.number }}
16
+ package: peft
17
+ custom_container: huggingface/transformers-doc-builder
working/peft/.github/workflows/integrations_tests.yml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: integration tests
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ inputs:
6
+ branch:
7
+ description: 'Branch to test on'
8
+ required: true
9
+
10
+ jobs:
11
+ run_transformers_integration_tests:
12
+ strategy:
13
+ fail-fast: false
14
+ matrix:
15
+ transformers-version: ['main', 'latest']
16
+ runs-on: ubuntu-latest
17
+ steps:
18
+ - uses: actions/checkout@v4
19
+ with:
20
+ ref: ${{ github.event.inputs.branch }}
21
+ repository: ${{ github.event.pull_request.head.repo.full_name }}
22
+ - name: Set up Python
23
+ uses: actions/setup-python@v4
24
+ with:
25
+ python-version: "3.10"
26
+ cache: "pip"
27
+ cache-dependency-path: "setup.py"
28
+ - name: print environment variables
29
+ run: |
30
+ echo "env.CI_BRANCH = ${{ env.CI_BRANCH }}"
31
+ echo "env.CI_SHA = ${{ env.CI_SHA }}"
32
+ - name: Install dependencies
33
+ run: |
34
+ python -m pip install --upgrade pip
35
+ python -m pip install .[test]
36
+ if [ "${{ matrix.transformers-version }}" == "main" ]; then
37
+ pip install -U git+https://github.com/huggingface/transformers.git
38
+ else
39
+ echo "Nothing to do as transformers latest already installed"
40
+ fi
41
+
42
+ - name: Test transformers integration
43
+ run: |
44
+ cd .. && git clone https://github.com/huggingface/transformers.git && cd transformers/ && git rev-parse HEAD
45
+ RUN_SLOW=1 pytest tests/peft_integration/test_peft_integration.py
46
+ run_diffusers_integration_tests:
47
+ strategy:
48
+ fail-fast: false
49
+ matrix:
50
+ # For now diffusers integration is not on PyPI
51
+ diffusers-version: ['main']
52
+ runs-on: ubuntu-latest
53
+ steps:
54
+ - uses: actions/checkout@v4
55
+ with:
56
+ ref: ${{ github.event.inputs.branch }}
57
+ repository: ${{ github.event.pull_request.head.repo.full_name }}
58
+ - name: Set up Python
59
+ uses: actions/setup-python@v4
60
+ with:
61
+ python-version: "3.10"
62
+ cache: "pip"
63
+ cache-dependency-path: "setup.py"
64
+ - name: print environment variables
65
+ run: |
66
+ echo "env.CI_BRANCH = ${{ env.CI_BRANCH }}"
67
+ echo "env.CI_SHA = ${{ env.CI_SHA }}"
68
+ - name: Install dependencies
69
+ run: |
70
+ python -m pip install --upgrade pip
71
+ python -m pip install .[test]
72
+
73
+ if [ "${{ matrix.diffusers-version }}" == "main" ]; then
74
+ pip install -U git+https://github.com/huggingface/diffusers.git
75
+ else
76
+ echo "Nothing to do as diffusers latest already installed"
77
+ fi
78
+
79
+ - name: Test diffusers integration
80
+ run: |
81
+ cd .. && git clone https://github.com/huggingface/diffusers.git && cd diffusers/ && git rev-parse HEAD
82
+ pytest tests/lora/test_lora_layers_peft.py
working/peft/.github/workflows/nightly-bnb.yml ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: BNB from source self-hosted runner with slow tests (scheduled)
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ schedule:
6
+ - cron: "0 2 * * *"
7
+
8
+ env:
9
+ RUN_SLOW: "yes"
10
+ IS_GITHUB_CI: "1"
11
+ # To be able to run tests on CUDA 12.2
12
+ NVIDIA_DISABLE_REQUIRE: "1"
13
+ SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
14
+
15
+
16
+ jobs:
17
+ run_all_tests_single_gpu:
18
+ strategy:
19
+ fail-fast: false
20
+ matrix:
21
+ docker-image-name: ["huggingface/peft-gpu-bnb-source:latest", "huggingface/peft-gpu-bnb-latest:latest", "huggingface/peft-gpu-bnb-multi-source:latest"]
22
+ runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
23
+ env:
24
+ CUDA_VISIBLE_DEVICES: "0"
25
+ TEST_TYPE: "single_gpu_${{ matrix.docker-image-name }}"
26
+ container:
27
+ image: ${{ matrix.docker-image-name }}
28
+ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
29
+ defaults:
30
+ run:
31
+ shell: bash
32
+ steps:
33
+ - uses: actions/checkout@v3
34
+ - name: Pip install
35
+ run: |
36
+ source activate peft
37
+ pip install -e . --no-deps
38
+ pip install pytest-reportlog pytest-cov parameterized datasets scipy einops
39
+ pip install "pytest>=7.2.0,<8.0.0" # see: https://github.com/huggingface/transformers/blob/ce4fff0be7f6464d713f7ac3e0bbaafbc6959ae5/setup.py#L148C6-L148C26
40
+ mkdir transformers-clone && git clone https://github.com/huggingface/transformers.git transformers-clone # rename to transformers clone to avoid modules conflict
41
+ if [ "${{ matrix.docker-image-name }}" == "huggingface/peft-gpu-bnb-latest:latest" ]; then
42
+ cd transformers-clone
43
+ transformers_version=$(pip show transformers | grep '^Version:' | cut -d ' ' -f2 | sed 's/\.dev0//')
44
+ echo "Checking out tag for Transformers version: v$transformers_version"
45
+ git fetch --tags
46
+ git checkout tags/v$transformers_version
47
+ cd ..
48
+ fi
49
+ - name: Run examples on single GPU
50
+ if: always()
51
+ run: |
52
+ source activate peft
53
+ make tests_examples_single_gpu_bnb
54
+
55
+ - name: Run core tests on single GPU
56
+ if: always()
57
+ run: |
58
+ source activate peft
59
+ make tests_core_single_gpu_bnb
60
+
61
+ - name: Run BNB regression tests on single GPU
62
+ if: always()
63
+ run: |
64
+ source activate peft
65
+ make tests_gpu_bnb_regression
66
+
67
+ - name: Run transformers tests on single GPU
68
+ if: always()
69
+ run: |
70
+ source activate peft
71
+ make transformers_tests
72
+
73
+ - name: Generate Report
74
+ if: always()
75
+ run: |
76
+ pip install slack_sdk tabulate
77
+ python scripts/log_reports.py --slack_channel_name bnb-daily-ci-collab >> $GITHUB_STEP_SUMMARY
78
+
79
+ run_all_tests_multi_gpu:
80
+ strategy:
81
+ fail-fast: false
82
+ matrix:
83
+ docker-image-name: ["huggingface/peft-gpu-bnb-source:latest", "huggingface/peft-gpu-bnb-latest:latest", "huggingface/peft-gpu-bnb-multi-source:latest"]
84
+ runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
85
+ env:
86
+ CUDA_VISIBLE_DEVICES: "0,1"
87
+ TEST_TYPE: "multi_gpu_${{ matrix.docker-image-name }}"
88
+ container:
89
+ image: ${{ matrix.docker-image-name }}
90
+ options: --gpus all --shm-size "16gb" --ipc host -v /mnt/cache/.cache/huggingface:/mnt/cache/
91
+ defaults:
92
+ run:
93
+ shell: bash
94
+ steps:
95
+ - uses: actions/checkout@v3
96
+ - name: Pip install
97
+ run: |
98
+ source activate peft
99
+ pip install -e . --no-deps
100
+ pip install pytest-reportlog pytest-cov parameterized datasets scipy einops
101
+ pip install "pytest>=7.2.0,<8.0.0" # see: https://github.com/huggingface/transformers/blob/ce4fff0be7f6464d713f7ac3e0bbaafbc6959ae5/setup.py#L148C6-L148C26
102
+ mkdir transformers-clone && git clone https://github.com/huggingface/transformers.git transformers-clone
103
+ if [ "${{ matrix.docker-image-name }}" == "huggingface/peft-gpu-bnb-latest:latest" ]; then
104
+ cd transformers-clone
105
+ transformers_version=$(pip show transformers | grep '^Version:' | cut -d ' ' -f2 | sed 's/\.dev0//')
106
+ echo "Checking out tag for Transformers version: v$transformers_version"
107
+ git fetch --tags
108
+ git checkout tags/v$transformers_version
109
+ cd ..
110
+ fi
111
+
112
+ - name: Run core GPU tests on multi-gpu
113
+ if: always()
114
+ run: |
115
+ source activate peft
116
+
117
+ - name: Run examples on multi GPU
118
+ if: always()
119
+ run: |
120
+ source activate peft
121
+ make tests_examples_multi_gpu_bnb
122
+
123
+ - name: Run core tests on multi GPU
124
+ if: always()
125
+ run: |
126
+ source activate peft
127
+ make tests_core_multi_gpu_bnb
128
+
129
+ - name: Run transformers tests on multi GPU
130
+ if: always()
131
+ run: |
132
+ source activate peft
133
+ make transformers_tests
134
+
135
+ - name: Generate Report
136
+ if: always()
137
+ run: |
138
+ pip install slack_sdk tabulate
139
+ python scripts/log_reports.py --slack_channel_name bnb-daily-ci-collab >> $GITHUB_STEP_SUMMARY
working/peft/.github/workflows/nightly.yml ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Self-hosted runner with slow tests (scheduled)
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ schedule:
6
+ - cron: "0 2 * * *"
7
+
8
+ env:
9
+ RUN_SLOW: "yes"
10
+ IS_GITHUB_CI: "1"
11
+ # To be able to run tests on CUDA 12.2
12
+ NVIDIA_DISABLE_REQUIRE: "1"
13
+ SLACK_API_TOKEN: ${{ secrets.SLACK_API_TOKEN }}
14
+
15
+
16
+ jobs:
17
+ run_all_tests_single_gpu:
18
+ strategy:
19
+ fail-fast: false
20
+ runs-on: [self-hosted, single-gpu, nvidia-gpu, t4, ci]
21
+ env:
22
+ CUDA_VISIBLE_DEVICES: "0"
23
+ TEST_TYPE: "single_gpu"
24
+ container:
25
+ image: huggingface/peft-gpu:latest
26
+ options: --gpus all --shm-size "16gb" -e NVIDIA_DISABLE_REQUIRE=true
27
+ defaults:
28
+ run:
29
+ shell: bash
30
+ steps:
31
+ - uses: actions/checkout@v3
32
+ - name: Pip install
33
+ run: |
34
+ source activate peft
35
+ pip install -e . --no-deps
36
+ pip install pytest-reportlog
37
+
38
+ - name: Run common tests on single GPU
39
+ run: |
40
+ source activate peft
41
+ make tests_common_gpu
42
+
43
+ - name: Run examples on single GPU
44
+ run: |
45
+ source activate peft
46
+ make tests_examples_single_gpu
47
+
48
+ - name: Run core tests on single GPU
49
+ run: |
50
+ source activate peft
51
+ make tests_core_single_gpu
52
+
53
+ - name: Run regression tests on single GPU
54
+ run: |
55
+ source activate peft
56
+ make tests_regression
57
+
58
+ - name: Generate Report
59
+ if: always()
60
+ run: |
61
+ pip install slack_sdk tabulate
62
+ python scripts/log_reports.py >> $GITHUB_STEP_SUMMARY
63
+
64
+ run_all_tests_multi_gpu:
65
+ strategy:
66
+ fail-fast: false
67
+ runs-on: [self-hosted, multi-gpu, nvidia-gpu, t4, ci]
68
+ env:
69
+ CUDA_VISIBLE_DEVICES: "0,1"
70
+ TEST_TYPE: "multi_gpu"
71
+ container:
72
+ image: huggingface/peft-gpu:latest
73
+ options: --gpus all --shm-size "16gb" -e NVIDIA_DISABLE_REQUIRE=true
74
+ defaults:
75
+ run:
76
+ shell: bash
77
+ steps:
78
+ - uses: actions/checkout@v3
79
+ - name: Pip install
80
+ run: |
81
+ source activate peft
82
+ pip install -e . --no-deps
83
+ pip install pytest-reportlog
84
+
85
+ - name: Run core GPU tests on multi-gpu
86
+ run: |
87
+ source activate peft
88
+
89
+ - name: Run common tests on multi GPU
90
+ run: |
91
+ source activate peft
92
+ make tests_common_gpu
93
+
94
+ - name: Run examples on multi GPU
95
+ run: |
96
+ source activate peft
97
+ make tests_examples_multi_gpu
98
+
99
+ - name: Run core tests on multi GPU
100
+ run: |
101
+ source activate peft
102
+ make tests_core_multi_gpu
103
+
104
+ - name: Generate Report
105
+ if: always()
106
+ run: |
107
+ pip install slack_sdk tabulate
108
+ python scripts/log_reports.py >> $GITHUB_STEP_SUMMARY
working/peft/.github/workflows/stale.yml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Stale Bot
2
+
3
+ on:
4
+ schedule:
5
+ - cron: "0 15 * * *"
6
+
7
+ jobs:
8
+ close_stale_issues:
9
+ name: Close Stale Issues
10
+ if: github.repository == 'huggingface/peft'
11
+ runs-on: ubuntu-latest
12
+ env:
13
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
14
+ steps:
15
+ - uses: actions/checkout@v3
16
+
17
+ - name: Setup Python
18
+ uses: actions/setup-python@v4
19
+ with:
20
+ python-version: 3.8
21
+
22
+ - name: Install requirements
23
+ run: |
24
+ pip install PyGithub
25
+ - name: Close stale issues
26
+ run: |
27
+ python scripts/stale.py
working/peft/.github/workflows/test-docker-build.yml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Test Docker images (on PR)
2
+
3
+ on:
4
+ pull_request:
5
+ paths:
6
+ # Run only when DockerFile files are modified
7
+ - "docker/**"
8
+ jobs:
9
+ get_changed_files:
10
+ name: "Build all modified docker images"
11
+ runs-on: ubuntu-latest
12
+ outputs:
13
+ matrix: ${{ steps.set-matrix.outputs.matrix }}
14
+ steps:
15
+ - name: Check out code
16
+ uses: actions/checkout@v3
17
+ - name: Get changed files
18
+ id: changed-files
19
+ uses: tj-actions/changed-files@1c8e6069583811afb28f97afeaf8e7da80c6be5c #v42
20
+ with:
21
+ files: docker/**
22
+ json: "true"
23
+ - name: Run step if only the files listed above change
24
+ if: steps.changed-files.outputs.any_changed == 'true'
25
+ id: set-matrix
26
+ env:
27
+ ALL_CHANGED_FILES: ${{ steps.changed-files.outputs.all_changed_files }}
28
+ run: |
29
+ echo "matrix=${{ steps.changed-files.outputs.all_changed_files}}" >> $GITHUB_OUTPUT
30
+ build_modified_files:
31
+ needs: get_changed_files
32
+ name: Build Docker images on modified files
33
+ runs-on: ubuntu-latest
34
+ if: ${{ needs.get_changed_files.outputs.matrix }} != ''
35
+ strategy:
36
+ fail-fast: false
37
+ matrix:
38
+ docker-file: ${{ fromJson(needs.get_changed_files.outputs.matrix) }}
39
+ steps:
40
+ - name: Cleanup disk
41
+ run: |
42
+ sudo ls -l /usr/local/lib/
43
+ sudo ls -l /usr/share/
44
+ sudo du -sh /usr/local/lib/
45
+ sudo du -sh /usr/share/
46
+ sudo rm -rf /usr/local/lib/android
47
+ sudo rm -rf /usr/share/dotnet
48
+ sudo du -sh /usr/local/lib/
49
+ sudo du -sh /usr/share/
50
+ - name: Set up Docker Buildx
51
+ uses: docker/setup-buildx-action@v1
52
+ - name: Check out code
53
+ uses: actions/checkout@v3
54
+ - name: Build Docker image
55
+ uses: docker/build-push-action@v4
56
+ with:
57
+ file: ${{ matrix.docker-file }}
58
+ context: .
59
+ push: False
working/peft/.github/workflows/tests-main.yml ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: tests on transformers main
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ paths-ignore:
7
+ - 'docs/**'
8
+
9
+ jobs:
10
+ tests:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v3
14
+ - name: Set up Python 3.11
15
+ uses: actions/setup-python@v4
16
+ with:
17
+ python-version: 3.11
18
+ cache: "pip"
19
+ cache-dependency-path: "setup.py"
20
+ - name: Install dependencies
21
+ run: |
22
+ python -m pip install --upgrade pip
23
+ # cpu version of pytorch
24
+ pip install -U git+https://github.com/huggingface/transformers.git
25
+ pip install -e .[test]
26
+ - name: Test with pytest
27
+ run: |
28
+ make test
working/peft/.github/workflows/tests.yml ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: tests
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ paths-ignore:
7
+ - 'docs/**'
8
+ pull_request:
9
+ paths-ignore:
10
+ - 'docs/**'
11
+
12
+ jobs:
13
+ check_code_quality:
14
+ runs-on: ubuntu-latest
15
+ steps:
16
+ - uses: actions/checkout@v3
17
+ - name: Set up Python
18
+ uses: actions/setup-python@v4
19
+ with:
20
+ python-version: "3.8"
21
+ cache: "pip"
22
+ cache-dependency-path: "setup.py"
23
+ - name: Install dependencies
24
+ run: |
25
+ python -m pip install --upgrade pip
26
+ pip install .[dev]
27
+ - name: Check quality
28
+ run: |
29
+ make quality
30
+
31
+ tests:
32
+ needs: check_code_quality
33
+ strategy:
34
+ matrix:
35
+ python-version: ["3.8", "3.9", "3.10", "3.11"]
36
+ os: ["ubuntu-latest", "macos-12", "windows-latest"]
37
+ runs-on: ${{ matrix.os }}
38
+ steps:
39
+ - uses: actions/checkout@v3
40
+ - name: Set up Python ${{ matrix.python-version }}
41
+ uses: actions/setup-python@v4
42
+ with:
43
+ python-version: ${{ matrix.python-version }}
44
+ cache: "pip"
45
+ cache-dependency-path: "setup.py"
46
+ - name: Install dependencies
47
+ run: |
48
+ python -m pip install --upgrade pip
49
+ # cpu version of pytorch
50
+ pip install -e .[test]
51
+ - name: Test with pytest
52
+ run: |
53
+ make test
working/peft/.github/workflows/torch_compile_tests.yml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: torch compile tests
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ inputs:
6
+ branch:
7
+ description: 'Branch to test on'
8
+ required: true
9
+ pytorch_nightly:
10
+ description: 'Whether to use PyTorch nightly (true/false)'
11
+ required: false
12
+ default: false
13
+
14
+ jobs:
15
+ run_tests_with_compile:
16
+ runs-on: ubuntu-latest
17
+ env:
18
+ PEFT_DEBUG_WITH_TORCH_COMPILE: 1
19
+ steps:
20
+ - uses: actions/checkout@v4
21
+ with:
22
+ ref: ${{ github.event.inputs.branch }}
23
+ repository: ${{ github.event.pull_request.head.repo.full_name }}
24
+ - name: Set up Python
25
+ uses: actions/setup-python@v4
26
+ with:
27
+ python-version: "3.10"
28
+ cache: "pip"
29
+ cache-dependency-path: "setup.py"
30
+ - name: Install dependencies
31
+ run: |
32
+ python -m pip install --upgrade pip
33
+ python -m pip install .[test]
34
+ python -m pip install bitsandbytes
35
+ if [ "${{ github.event.inputs.pytorch_nightly }}" = "true" ]; then
36
+ python -m pip install --upgrade --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu
37
+ fi
38
+ - name: Test compile with pytest
39
+ run: |
40
+ echo "PEFT_DEBUG_WITH_TORCH_COMPILE=$PEFT_DEBUG_WITH_TORCH_COMPILE"
41
+ git status
42
+ make tests_torch_compile
working/peft/.github/workflows/upload_pr_documentation.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Upload PR Documentation
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows: ["Build PR Documentation"]
6
+ types:
7
+ - completed
8
+
9
+ jobs:
10
+ build:
11
+ uses: huggingface/doc-builder/.github/workflows/upload_pr_documentation.yml@main
12
+ with:
13
+ package_name: peft
14
+ secrets:
15
+ hf_token: ${{ secrets.HF_DOC_BUILD_PUSH }}
16
+ comment_bot_token: ${{ secrets.COMMENT_BOT_TOKEN }}
working/peft/.gitignore ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ pip-wheel-metadata/
24
+ share/python-wheels/
25
+ *.egg-info/
26
+ .installed.cfg
27
+ *.egg
28
+ MANIFEST
29
+
30
+ # PyInstaller
31
+ # Usually these files are written by a python script from a template
32
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
33
+ *.manifest
34
+ *.spec
35
+
36
+ # Installer logs
37
+ pip-log.txt
38
+ pip-delete-this-directory.txt
39
+
40
+ # Unit test / coverage reports
41
+ htmlcov/
42
+ .tox/
43
+ .nox/
44
+ .coverage
45
+ .coverage.*
46
+ .cache
47
+ nosetests.xml
48
+ coverage.xml
49
+ *.cover
50
+ *.py,cover
51
+ .hypothesis/
52
+ .pytest_cache/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ target/
76
+
77
+ # Jupyter Notebook
78
+ .ipynb_checkpoints
79
+
80
+ # IPython
81
+ profile_default/
82
+ ipython_config.py
83
+
84
+ # pyenv
85
+ .python-version
86
+
87
+ # pipenv
88
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
90
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
91
+ # install all needed dependencies.
92
+ #Pipfile.lock
93
+
94
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95
+ __pypackages__/
96
+
97
+ # Celery stuff
98
+ celerybeat-schedule
99
+ celerybeat.pid
100
+
101
+ # SageMath parsed files
102
+ *.sage.py
103
+
104
+ # Environments
105
+ .env
106
+ .venv
107
+ env/
108
+ venv/
109
+ ENV/
110
+ env.bak/
111
+ venv.bak/
112
+
113
+ # Spyder project settings
114
+ .spyderproject
115
+ .spyproject
116
+
117
+ # Rope project settings
118
+ .ropeproject
119
+
120
+ # mkdocs documentation
121
+ /site
122
+
123
+ # mypy
124
+ .mypy_cache/
125
+ .dmypy.json
126
+ dmypy.json
127
+
128
+ # Pyre type checker
129
+ .pyre/
130
+
131
+ # VSCode
132
+ .vscode
133
+
134
+ # IntelliJ
135
+ .idea
136
+
137
+ # Mac .DS_Store
138
+ .DS_Store
139
+
140
+ # More test things
141
+ wandb
working/peft/.pre-commit-config.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ repos:
2
+ - repo: https://github.com/astral-sh/ruff-pre-commit
3
+ rev: v0.2.1
4
+ hooks:
5
+ - id: ruff
6
+ args:
7
+ - --fix
8
+ - id: ruff-format
9
+ - repo: https://github.com/pre-commit/pre-commit-hooks
10
+ rev: v4.5.0
11
+ hooks:
12
+ - id: check-merge-conflict
13
+ - id: check-yaml
working/peft/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
working/peft/Makefile ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: quality style test docs
2
+
3
+ check_dirs := src tests examples docs scripts docker
4
+
5
+ # Check that source code meets quality standards
6
+
7
+ # this target runs checks on all files
8
+ quality:
9
+ ruff $(check_dirs)
10
+ ruff format --check $(check_dirs)
11
+ doc-builder style src/peft tests docs/source --max_len 119 --check_only
12
+
13
+ # Format source code automatically and check is there are any problems left that need manual fixing
14
+ style:
15
+ ruff $(check_dirs) --fix
16
+ ruff format $(check_dirs)
17
+ doc-builder style src/peft tests docs/source --max_len 119
18
+
19
+ test:
20
+ python -m pytest -n 3 tests/ $(if $(IS_GITHUB_CI),--report-log "ci_tests.log",)
21
+
22
+ tests_examples_multi_gpu:
23
+ python -m pytest -m multi_gpu_tests tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "multi_gpu_examples.log",)
24
+
25
+ tests_examples_single_gpu:
26
+ python -m pytest -m single_gpu_tests tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "single_gpu_examples.log",)
27
+
28
+ tests_core_multi_gpu:
29
+ python -m pytest -m multi_gpu_tests tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_multi_gpu.log",)
30
+
31
+ tests_core_single_gpu:
32
+ python -m pytest -m single_gpu_tests tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",)
33
+
34
+ tests_common_gpu:
35
+ python -m pytest tests/test_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_decoder.log",)
36
+ python -m pytest tests/test_encoder_decoder_models.py $(if $(IS_GITHUB_CI),--report-log "common_encoder_decoder.log",)
37
+
38
+ tests_examples_multi_gpu_bnb:
39
+ python -m pytest -m "multi_gpu_tests and bitsandbytes" tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "multi_gpu_examples.log",)
40
+
41
+ tests_examples_single_gpu_bnb:
42
+ python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_gpu_examples.py $(if $(IS_GITHUB_CI),--report-log "single_gpu_examples.log",)
43
+
44
+ tests_core_multi_gpu_bnb:
45
+ python -m pytest -m "multi_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_multi_gpu.log",)
46
+
47
+ tests_core_single_gpu_bnb:
48
+ python -m pytest -m "single_gpu_tests and bitsandbytes" tests/test_common_gpu.py $(if $(IS_GITHUB_CI),--report-log "core_single_gpu.log",)
49
+
50
+ tests_gpu_bnb_regression:
51
+ python -m pytest tests/bnb/test_bnb_regression.py $(if $(IS_GITHUB_CI),--report-log "bnb_regression_gpu.log",)
52
+
53
+ # For testing transformers tests for bnb runners
54
+ transformers_tests:
55
+ RUN_SLOW=1 python -m pytest transformers-clone/tests/quantization/bnb $(if $(IS_GITHUB_CI),--report-log "transformers_tests.log",)
56
+
57
+ tests_regression:
58
+ python -m pytest -s --regression tests/regression/ $(if $(IS_GITHUB_CI),--report-log "regression_tests.log",)
59
+
60
+ tests_torch_compile:
61
+ python -m pytest tests/test_torch_compile.py $(if $(IS_GITHUB_CI),--report-log "compile_tests.log",)
working/peft/README.md ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!---
2
+ Copyright 2023 The HuggingFace Team. All rights reserved.
3
+
4
+ Licensed under the Apache License, Version 2.0 (the "License");
5
+ you may not use this file except in compliance with the License.
6
+ You may obtain a copy of the License at
7
+
8
+ http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ Unless required by applicable law or agreed to in writing, software
11
+ distributed under the License is distributed on an "AS IS" BASIS,
12
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ See the License for the specific language governing permissions and
14
+ limitations under the License.
15
+ -->
16
+
17
+ <h1 align="center"> <p>🤗 PEFT</p></h1>
18
+ <h3 align="center">
19
+ <p>State-of-the-art Parameter-Efficient Fine-Tuning (PEFT) methods</p>
20
+ </h3>
21
+
22
+ Fine-tuning large pretrained models is often prohibitively costly due to their scale. Parameter-Efficient Fine-Tuning (PEFT) methods enable efficient adaptation of large pretrained models to various downstream applications by only fine-tuning a small number of (extra) model parameters instead of all the model's parameters. This significantly decreases the computational and storage costs. Recent state-of-the-art PEFT techniques achieve performance comparable to fully fine-tuned models.
23
+
24
+ PEFT is integrated with Transformers for easy model training and inference, Diffusers for conveniently managing different adapters, and Accelerate for distributed training and inference for really big models.
25
+
26
+ > [!TIP]
27
+ > Visit the [PEFT](https://huggingface.co/PEFT) organization to read about the PEFT methods implemented in the library and to see notebooks demonstrating how to apply these methods to a variety of downstream tasks. Click the "Watch repos" button on the organization page to be notified of newly implemented methods and notebooks!
28
+
29
+ Check the PEFT Adapters API Reference section for a list of supported PEFT methods, and read the [Adapters](https://huggingface.co/docs/peft/en/conceptual_guides/adapter), [Soft prompts](https://huggingface.co/docs/peft/en/conceptual_guides/prompting), and [IA3](https://huggingface.co/docs/peft/en/conceptual_guides/ia3) conceptual guides to learn more about how these methods work.
30
+
31
+ ## Quickstart
32
+
33
+ Install PEFT from pip:
34
+
35
+ ```bash
36
+ pip install peft
37
+ ```
38
+
39
+ Prepare a model for training with a PEFT method such as LoRA by wrapping the base model and PEFT configuration with `get_peft_model`. For the bigscience/mt0-large model, you're only training 0.19% of the parameters!
40
+
41
+ ```python
42
+ from transformers import AutoModelForSeq2SeqLM
43
+ from peft import get_peft_config, get_peft_model, LoraConfig, TaskType
44
+ model_name_or_path = "bigscience/mt0-large"
45
+ tokenizer_name_or_path = "bigscience/mt0-large"
46
+
47
+ peft_config = LoraConfig(
48
+ task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1
49
+ )
50
+
51
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path)
52
+ model = get_peft_model(model, peft_config)
53
+ model.print_trainable_parameters()
54
+ "trainable params: 2359296 || all params: 1231940608 || trainable%: 0.19151053100118282"
55
+ ```
56
+
57
+ To load a PEFT model for inference:
58
+
59
+ ```py
60
+ from peft import AutoPeftModelForCausalLM
61
+ from transformers import AutoTokenizer
62
+ import torch
63
+
64
+ model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora").to("cuda")
65
+ tokenizer = AutoTokenizer.from_pretrained("facebook/opt-350m")
66
+
67
+ model.eval()
68
+ inputs = tokenizer("Preheat the oven to 350 degrees and place the cookie dough", return_tensors="pt")
69
+
70
+ outputs = model.generate(input_ids=inputs["input_ids"].to("cuda"), max_new_tokens=50)
71
+ print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0])
72
+
73
+ "Preheat the oven to 350 degrees and place the cookie dough in the center of the oven. In a large bowl, combine the flour, baking powder, baking soda, salt, and cinnamon. In a separate bowl, combine the egg yolks, sugar, and vanilla."
74
+ ```
75
+
76
+ ## Why you should use PEFT
77
+
78
+ There are many benefits of using PEFT but the main one is the huge savings in compute and storage, making PEFT applicable to many different use cases.
79
+
80
+ ### High performance on consumer hardware
81
+
82
+ Consider the memory requirements for training the following models on the [ought/raft/twitter_complaints](https://huggingface.co/datasets/ought/raft/viewer/twitter_complaints) dataset with an A100 80GB GPU with more than 64GB of CPU RAM.
83
+
84
+ | Model | Full Finetuning | PEFT-LoRA PyTorch | PEFT-LoRA DeepSpeed with CPU Offloading |
85
+ | --------- | ---- | ---- | ---- |
86
+ | bigscience/T0_3B (3B params) | 47.14GB GPU / 2.96GB CPU | 14.4GB GPU / 2.96GB CPU | 9.8GB GPU / 17.8GB CPU |
87
+ | bigscience/mt0-xxl (12B params) | OOM GPU | 56GB GPU / 3GB CPU | 22GB GPU / 52GB CPU |
88
+ | bigscience/bloomz-7b1 (7B params) | OOM GPU | 32GB GPU / 3.8GB CPU | 18.1GB GPU / 35GB CPU |
89
+
90
+ With LoRA you can fully finetune a 12B parameter model that would've otherwise run out of memory on the 80GB GPU, and comfortably fit and train a 3B parameter model. When you look at the 3B parameter model's performance, it is comparable to a fully finetuned model at a fraction of the GPU memory.
91
+
92
+ | Submission Name | Accuracy |
93
+ | --------- | ---- |
94
+ | Human baseline (crowdsourced) | 0.897 |
95
+ | Flan-T5 | 0.892 |
96
+ | lora-t0-3b | 0.863 |
97
+
98
+ > [!TIP]
99
+ > The bigscience/T0_3B model performance isn't optimized in the table above. You can squeeze even more performance out of it by playing around with the input instruction templates, LoRA hyperparameters, and other training related hyperparameters. The final checkpoint size of this model is just 19MB compared to 11GB of the full bigscience/T0_3B model. Learn more about the advantages of finetuning with PEFT in this [blog post](https://www.philschmid.de/fine-tune-flan-t5-peft).
100
+
101
+ ### Quantization
102
+
103
+ Quantization is another method for reducing the memory requirements of a model by representing the data in a lower precision. It can be combined with PEFT methods to make it even easier to train and load LLMs for inference.
104
+
105
+ * Learn how to finetune [meta-llama/Llama-2-7b-hf](https://huggingface.co/meta-llama/Llama-2-7b-hf) with QLoRA and the [TRL](https://huggingface.co/docs/trl/index) library on a 16GB GPU in the [Finetune LLMs on your own consumer hardware using tools from PyTorch and Hugging Face ecosystem](https://pytorch.org/blog/finetune-llms/) blog post.
106
+ * Learn how to finetune a [openai/whisper-large-v2](https://huggingface.co/openai/whisper-large-v2) model for multilingual automatic speech recognition with LoRA and 8-bit quantization in this [notebook](https://colab.research.google.com/drive/1DOkD_5OUjFa0r5Ik3SgywJLJtEo2qLxO?usp=sharing) (see this [notebook](https://colab.research.google.com/drive/1vhF8yueFqha3Y3CpTHN6q9EVcII9EYzs?usp=sharing) instead for an example of streaming a dataset).
107
+
108
+ ### Save compute and storage
109
+
110
+ PEFT can help you save storage by avoiding full finetuning of models on each of downstream task or dataset. In many cases, you're only finetuning a very small fraction of a model's parameters and each checkpoint is only a few MBs in size (instead of GBs). These smaller PEFT adapters demonstrate performance comparable to a fully finetuned model. If you have many datasets, you can save a lot of storage with a PEFT model and not have to worry about catastrophic forgetting or overfitting the backbone or base model.
111
+
112
+ ## PEFT integrations
113
+
114
+ PEFT is widely supported across the Hugging Face ecosystem because of the massive efficiency it brings to training and inference.
115
+
116
+ ### Diffusers
117
+
118
+ The iterative diffusion process consumes a lot of memory which can make it difficult to train. PEFT can help reduce the memory requirements and reduce the storage size of the final model checkpoint. For example, consider the memory required for training a Stable Diffusion model with LoRA on an A100 80GB GPU with more than 64GB of CPU RAM. The final model checkpoint size is only 8.8MB!
119
+
120
+ | Model | Full Finetuning | PEFT-LoRA | PEFT-LoRA with Gradient Checkpointing |
121
+ | --------- | ---- | ---- | ---- |
122
+ | CompVis/stable-diffusion-v1-4 | 27.5GB GPU / 3.97GB CPU | 15.5GB GPU / 3.84GB CPU | 8.12GB GPU / 3.77GB CPU |
123
+
124
+ > [!TIP]
125
+ > Take a look at the [examples/lora_dreambooth/train_dreambooth.py](examples/lora_dreambooth/train_dreambooth.py) training script to try training your own Stable Diffusion model with LoRA, and play around with the [smangrul/peft-lora-sd-dreambooth](https://huggingface.co/spaces/smangrul/peft-lora-sd-dreambooth) Space which is running on a T4 instance. Learn more about the PEFT integration in Diffusers in this [tutorial](https://huggingface.co/docs/peft/main/en/tutorial/peft_integrations#diffusers).
126
+
127
+ ### Accelerate
128
+
129
+ [Accelerate](https://huggingface.co/docs/accelerate/index) is a library for distributed training and inference on various training setups and hardware (GPUs, TPUs, Apple Silicon, etc.). PEFT models work with Accelerate out of the box, making it really convenient to train really large models or use them for inference on consumer hardware with limited resources.
130
+
131
+ ### TRL
132
+
133
+ PEFT can also be applied to training LLMs with RLHF components such as the ranker and policy. Get started by reading:
134
+
135
+ * [Fine-tune a Mistral-7b model with Direct Preference Optimization](https://towardsdatascience.com/fine-tune-a-mistral-7b-model-with-direct-preference-optimization-708042745aac) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library to learn more about the Direct Preference Optimization (DPO) method and how to apply it to a LLM.
136
+ * [Fine-tuning 20B LLMs with RLHF on a 24GB consumer GPU](https://huggingface.co/blog/trl-peft) with PEFT and the [TRL](https://huggingface.co/docs/trl/index) library, and then try out the [gpt2-sentiment_peft.ipynb](https://github.com/huggingface/trl/blob/main/examples/notebooks/gpt2-sentiment.ipynb) notebook to optimize GPT2 to generate positive movie reviews.
137
+ * [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama) with PEFT, and then try out the [stack_llama/scripts](https://github.com/huggingface/trl/tree/main/examples/research_projects/stack_llama/scripts) for supervised finetuning, reward modeling, and RL finetuning.
138
+
139
+ ## Model support
140
+
141
+ Use this [Space](https://stevhliu-peft-methods.hf.space) or check out the [docs](https://huggingface.co/docs/peft/main/en/index) to find which models officially support a PEFT method out of the box. Even if you don't see a model listed below, you can manually configure the model config to enable PEFT for a model. Read the [New transformers architecture](https://huggingface.co/docs/peft/main/en/developer_guides/custom_models#new-transformers-architectures) guide to learn how.
142
+
143
+ ## Contribute
144
+
145
+ If you would like to contribute to PEFT, please check out our [contribution guide](https://huggingface.co/docs/peft/developer_guides/contributing).
146
+
147
+ ## Citing 🤗 PEFT
148
+
149
+ To use 🤗 PEFT in your publication, please cite it by using the following BibTeX entry.
150
+
151
+ ```bibtex
152
+ @Misc{peft,
153
+ title = {PEFT: State-of-the-art Parameter-Efficient Fine-Tuning methods},
154
+ author = {Sourab Mangrulkar and Sylvain Gugger and Lysandre Debut and Younes Belkada and Sayak Paul and Benjamin Bossan},
155
+ howpublished = {\url{https://github.com/huggingface/peft}},
156
+ year = {2022}
157
+ }
158
+ ```
working/peft/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+ # There's no way to ignore "F401 '...' imported but unused" warnings in this
3
+ # module, but to preserve other warnings. So, don't check this module at all.
4
+
5
+ # coding=utf-8
6
+ # Copyright 2023-present the HuggingFace Inc. team.
7
+ #
8
+ # Licensed under the Apache License, Version 2.0 (the "License");
9
+ # you may not use this file except in compliance with the License.
10
+ # You may obtain a copy of the License at
11
+ #
12
+ # http://www.apache.org/licenses/LICENSE-2.0
13
+ #
14
+ # Unless required by applicable law or agreed to in writing, software
15
+ # distributed under the License is distributed on an "AS IS" BASIS,
16
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17
+ # See the License for the specific language governing permissions and
18
+ # limitations under the License.
19
+
20
+ __version__ = "0.11.2.dev0"
21
+
22
+ from .auto import (
23
+ AutoPeftModel,
24
+ AutoPeftModelForCausalLM,
25
+ AutoPeftModelForSequenceClassification,
26
+ AutoPeftModelForSeq2SeqLM,
27
+ AutoPeftModelForTokenClassification,
28
+ AutoPeftModelForQuestionAnswering,
29
+ AutoPeftModelForFeatureExtraction,
30
+ )
31
+ from .mapping import (
32
+ MODEL_TYPE_TO_PEFT_MODEL_MAPPING,
33
+ PEFT_TYPE_TO_CONFIG_MAPPING,
34
+ get_peft_config,
35
+ get_peft_model,
36
+ inject_adapter_in_model,
37
+ )
38
+ from .mixed_model import PeftMixedModel
39
+ from .peft_model import (
40
+ PeftModel,
41
+ PeftModelForCausalLM,
42
+ PeftModelForSeq2SeqLM,
43
+ PeftModelForSequenceClassification,
44
+ PeftModelForTokenClassification,
45
+ PeftModelForQuestionAnswering,
46
+ PeftModelForFeatureExtraction,
47
+ get_layer_status,
48
+ get_model_status,
49
+ )
50
+ from .tuners import (
51
+ AdaptionPromptConfig,
52
+ AdaptionPromptModel,
53
+ LoraConfig,
54
+ LoftQConfig,
55
+ LoraModel,
56
+ LoHaConfig,
57
+ LoHaModel,
58
+ LoKrConfig,
59
+ LoKrModel,
60
+ IA3Config,
61
+ IA3Model,
62
+ AdaLoraConfig,
63
+ AdaLoraModel,
64
+ BOFTConfig,
65
+ BOFTModel,
66
+ PrefixEncoder,
67
+ PrefixTuningConfig,
68
+ PromptEmbedding,
69
+ PromptEncoder,
70
+ PromptEncoderConfig,
71
+ PromptEncoderReparameterizationType,
72
+ PromptTuningConfig,
73
+ PromptTuningInit,
74
+ MultitaskPromptTuningConfig,
75
+ MultitaskPromptTuningInit,
76
+ OFTConfig,
77
+ OFTModel,
78
+ PolyConfig,
79
+ PolyModel,
80
+ LNTuningConfig,
81
+ LNTuningModel,
82
+ VeraConfig,
83
+ VeraModel,
84
+ )
85
+ from .utils import (
86
+ TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
87
+ PeftType,
88
+ TaskType,
89
+ bloom_model_postprocess_past_key_value,
90
+ get_peft_model_state_dict,
91
+ prepare_model_for_kbit_training,
92
+ replace_lora_weights_loftq,
93
+ set_peft_model_state_dict,
94
+ shift_tokens_right,
95
+ load_peft_weights,
96
+ cast_mixed_precision_params,
97
+ )
98
+ from .config import PeftConfig, PromptLearningConfig
working/peft/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
working/peft/__pycache__/auto.cpython-310.pyc ADDED
Binary file (4.88 kB). View file
 
working/peft/__pycache__/config.cpython-310.pyc ADDED
Binary file (8.77 kB). View file
 
working/peft/__pycache__/import_utils.cpython-310.pyc ADDED
Binary file (2.15 kB). View file
 
working/peft/__pycache__/mapping.cpython-310.pyc ADDED
Binary file (5.8 kB). View file
 
working/peft/__pycache__/mixed_model.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
working/peft/__pycache__/peft_model.cpython-310.pyc ADDED
Binary file (76.6 kB). View file