Commit
•
bc240f5
1
Parent(s):
329631e
Upload folder using huggingface_hub
Browse files- README.md +4 -40
- adapter_config.json +1 -26
README.md
CHANGED
@@ -1,46 +1,9 @@
|
|
1 |
---
|
2 |
library_name: peft
|
3 |
-
license: mit
|
4 |
-
language:
|
5 |
-
- ar
|
6 |
---
|
7 |
-
|
8 |
-
===
|
9 |
-
|
10 |
-
## How to use
|
11 |
-
```python
|
12 |
-
from peft import AutoPeftModelForCausalLM
|
13 |
-
from transformers import AutoTokenizer
|
14 |
-
|
15 |
-
model = AutoPeftModelForCausalLM.from_pretrained(
|
16 |
-
"atsuki-yamaguchi/bloom-7b1-clpp-ar"
|
17 |
-
)
|
18 |
-
|
19 |
-
# w/ GPU
|
20 |
-
model = AutoPeftModelForCausalLM.from_pretrained(
|
21 |
-
"atsuki-yamaguchi/bloom-7b1-clpp-ar",
|
22 |
-
device_map="auto",
|
23 |
-
load_in_8bit=True,
|
24 |
-
)
|
25 |
-
```
|
26 |
-
|
27 |
-
## Citation
|
28 |
-
```
|
29 |
-
@article{yamaguchi2024empirical,
|
30 |
-
title={An Empirical Study on Cross-lingual Vocabulary Adaptation for Efficient Generative {LLM} Inference},
|
31 |
-
author={Atsuki Yamaguchi and Aline Villavicencio and Nikolaos Aletras},
|
32 |
-
journal={ArXiv},
|
33 |
-
year={2024},
|
34 |
-
volume={abs/2402.10712},
|
35 |
-
url={https://arxiv.org/abs/2402.10712}
|
36 |
-
}
|
37 |
-
```
|
38 |
-
|
39 |
-
## Link
|
40 |
-
For more details, please visit https://github.com/gucci-j/llm-cva
|
41 |
|
42 |
|
43 |
-
## Training procedure
|
44 |
The following `bitsandbytes` quantization config was used during training:
|
45 |
- quant_method: bitsandbytes
|
46 |
- load_in_8bit: True
|
@@ -52,6 +15,7 @@ The following `bitsandbytes` quantization config was used during training:
|
|
52 |
- bnb_4bit_quant_type: fp4
|
53 |
- bnb_4bit_use_double_quant: False
|
54 |
- bnb_4bit_compute_dtype: float32
|
55 |
-
|
56 |
### Framework versions
|
57 |
-
|
|
|
|
|
|
1 |
---
|
2 |
library_name: peft
|
|
|
|
|
|
|
3 |
---
|
4 |
+
## Training procedure
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
|
|
|
7 |
The following `bitsandbytes` quantization config was used during training:
|
8 |
- quant_method: bitsandbytes
|
9 |
- load_in_8bit: True
|
|
|
15 |
- bnb_4bit_quant_type: fp4
|
16 |
- bnb_4bit_use_double_quant: False
|
17 |
- bnb_4bit_compute_dtype: float32
|
|
|
18 |
### Framework versions
|
19 |
+
|
20 |
+
|
21 |
+
- PEFT 0.5.0
|
adapter_config.json
CHANGED
@@ -1,26 +1 @@
|
|
1 |
-
{
|
2 |
-
"auto_mapping": null,
|
3 |
-
"base_model_name_or_path": "atsuki-yamaguchi/bloom-7b1-clpp-ar",
|
4 |
-
"bias": "none",
|
5 |
-
"fan_in_fan_out": false,
|
6 |
-
"inference_mode": true,
|
7 |
-
"init_lora_weights": true,
|
8 |
-
"layers_pattern": null,
|
9 |
-
"layers_to_transform": null,
|
10 |
-
"lora_alpha": 32,
|
11 |
-
"lora_dropout": 0.05,
|
12 |
-
"modules_to_save": [
|
13 |
-
"lm_head",
|
14 |
-
"word_embeddings"
|
15 |
-
],
|
16 |
-
"peft_type": "LORA",
|
17 |
-
"r": 8,
|
18 |
-
"revision": null,
|
19 |
-
"target_modules": [
|
20 |
-
"query_key_value",
|
21 |
-
"dense",
|
22 |
-
"dense_h_to_4h",
|
23 |
-
"dense_4h_to_h"
|
24 |
-
],
|
25 |
-
"task_type": "CAUSAL_LM"
|
26 |
-
}
|
|
|
1 |
+
{"auto_mapping": null, "base_model_name_or_path": "atsuki-yamaguchi/bloom-7b1-clpp-ar", "bias": "none", "fan_in_fan_out": false, "inference_mode": true, "init_lora_weights": true, "layers_pattern": null, "layers_to_transform": null, "lora_alpha": 32, "lora_dropout": 0.05, "modules_to_save": ["lm_head", "word_embeddings"], "peft_type": "LORA", "r": 8, "revision": null, "target_modules": ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"], "task_type": "CAUSAL_LM"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|