monson commited on
Commit
be0bd1a
1 Parent(s): abb0bc9

Upload model

Browse files
Files changed (3) hide show
  1. README.md +3 -3
  2. adapter_config.json +5 -5
  3. adapter_model.safetensors +2 -2
README.md CHANGED
@@ -210,9 +210,9 @@ The following `bitsandbytes` quantization config was used during training:
210
  - llm_int8_skip_modules: None
211
  - llm_int8_enable_fp32_cpu_offload: False
212
  - llm_int8_has_fp16_weight: False
213
- - bnb_4bit_quant_type: fp4
214
- - bnb_4bit_use_double_quant: False
215
- - bnb_4bit_compute_dtype: float32
216
 
217
  ### Framework versions
218
 
 
210
  - llm_int8_skip_modules: None
211
  - llm_int8_enable_fp32_cpu_offload: False
212
  - llm_int8_has_fp16_weight: False
213
+ - bnb_4bit_quant_type: nf4
214
+ - bnb_4bit_use_double_quant: True
215
+ - bnb_4bit_compute_dtype: bfloat16
216
 
217
  ### Framework versions
218
 
adapter_config.json CHANGED
@@ -16,13 +16,13 @@
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
- "v_proj",
20
- "up_proj",
21
  "gate_proj",
 
22
  "down_proj",
23
- "q_proj",
24
- "k_proj",
25
- "o_proj"
26
  ],
27
  "task_type": "CAUSAL_LM"
28
  }
 
16
  "rank_pattern": {},
17
  "revision": null,
18
  "target_modules": [
19
+ "k_proj",
 
20
  "gate_proj",
21
+ "up_proj",
22
  "down_proj",
23
+ "v_proj",
24
+ "o_proj",
25
+ "q_proj"
26
  ],
27
  "task_type": "CAUSAL_LM"
28
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84dbf77a44ac1f7eec64126c499ac0cfc8428700d3a2801827a8ce7e1847e9c8
3
- size 42002136
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f64c1dd5291e221753a25aeecc279db155dc1e7eb9c6fc431d036b5d21a30229
3
+ size 83945296