Tirendaz commited on
Commit
2ff62ab
1 Parent(s): a49a9cd

Upload model

Browse files
Files changed (3) hide show
  1. README.md +0 -38
  2. adapter_config.json +8 -5
  3. adapter_model.safetensors +2 -2
README.md CHANGED
@@ -217,42 +217,4 @@ The following `bitsandbytes` quantization config was used during training:
217
  ### Framework versions
218
 
219
 
220
- - PEFT 0.6.2
221
- ## Training procedure
222
-
223
-
224
- The following `bitsandbytes` quantization config was used during training:
225
- - quant_method: bitsandbytes
226
- - load_in_8bit: False
227
- - load_in_4bit: True
228
- - llm_int8_threshold: 6.0
229
- - llm_int8_skip_modules: None
230
- - llm_int8_enable_fp32_cpu_offload: False
231
- - llm_int8_has_fp16_weight: False
232
- - bnb_4bit_quant_type: nf4
233
- - bnb_4bit_use_double_quant: False
234
- - bnb_4bit_compute_dtype: bfloat16
235
-
236
- ### Framework versions
237
-
238
-
239
- - PEFT 0.6.2
240
- ## Training procedure
241
-
242
-
243
- The following `bitsandbytes` quantization config was used during training:
244
- - quant_method: bitsandbytes
245
- - load_in_8bit: False
246
- - load_in_4bit: True
247
- - llm_int8_threshold: 6.0
248
- - llm_int8_skip_modules: None
249
- - llm_int8_enable_fp32_cpu_offload: False
250
- - llm_int8_has_fp16_weight: False
251
- - bnb_4bit_quant_type: nf4
252
- - bnb_4bit_use_double_quant: False
253
- - bnb_4bit_compute_dtype: bfloat16
254
-
255
- ### Framework versions
256
-
257
-
258
  - PEFT 0.6.2
 
217
  ### Framework versions
218
 
219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  - PEFT 0.6.2
adapter_config.json CHANGED
@@ -9,18 +9,21 @@
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "lora_alpha": 16,
12
- "lora_dropout": 0.1,
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
- "r": 64,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "k_proj",
 
 
 
20
  "v_proj",
21
- "o_proj",
22
  "gate_proj",
23
- "q_proj"
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
9
  "layers_pattern": null,
10
  "layers_to_transform": null,
11
  "lora_alpha": 16,
12
+ "lora_dropout": 0.05,
13
  "modules_to_save": null,
14
  "peft_type": "LORA",
15
+ "r": 8,
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "lm_head",
20
+ "q_proj",
21
+ "up_proj",
22
+ "down_proj",
23
  "v_proj",
 
24
  "gate_proj",
25
+ "k_proj",
26
+ "o_proj"
27
  ],
28
  "task_type": "CAUSAL_LM"
29
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:edeacf260f8a14d9b56ad699f051606977247af6bf8aa2caa6e692b441cd8ed0
3
- size 369142184
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b53fa764a5c88813feac666935cc4d8e7f2bd00acd92327b7b3084081ab091ac
3
+ size 85100592