jondurbin commited on
Commit
4c3040d
1 Parent(s): 58f61d4

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +44 -1
  2. adapter_config.json +26 -0
  3. adapter_model.safetensors +3 -0
README.md CHANGED
@@ -1,3 +1,46 @@
1
  ---
2
- license: llama2
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
  ---
4
+ following `bitsandbytes` quantization config was used during training:
5
+ - quant_method: bitsandbytes
6
+ - load_in_8bit: False
7
+ - load_in_4bit: True
8
+ - llm_int8_threshold: 6.0
9
+ - llm_int8_skip_modules: None
10
+ - llm_int8_enable_fp32_cpu_offload: False
11
+ - llm_int8_has_fp16_weight: False
12
+ - bnb_4bit_quant_type: nf4
13
+ - bnb_4bit_use_double_quant: True
14
+ - bnb_4bit_compute_dtype: bfloat16
15
+ ### Framework versions
16
+
17
+ - PEFT 0.5.0
18
+ - PEFT 0.5.0
19
+
20
+ - PEFT 0.5.0
21
+ ## Training procedure
22
+
23
+
24
+ The following `bitsandbytes` quantization config was used during training:
25
+ - quant_method: bitsandbytes
26
+ - load_in_8bit: False
27
+ - load_in_4bit: True
28
+ - llm_int8_threshold: 6.0
29
+ - llm_int8_skip_modules: None
30
+ - llm_int8_enable_fp32_cpu_offload: False
31
+ - llm_int8_has_fp16_weight: False
32
+ - bnb_4bit_quant_type: nf4
33
+ - bnb_4bit_use_double_quant: True
34
+ - bnb_4bit_compute_dtype: bfloat16
35
+
36
+ The following `bitsandbytes` quantization config was used during training:
37
+ - quant_method: bitsandbytes
38
+ - load_in_8bit: False
39
+ - load_in_4bit: True
40
+ - llm_int8_threshold: 6.0
41
+ - llm_int8_skip_modules: None
42
+ - llm_int8_enable_fp32_cpu_offload: False
43
+ - llm_int8_has_fp16_weight: False
44
+ - bnb_4bit_quant_type: nf4
45
+ - bnb_4bit_use_double_quant: True
46
+ - bnb_4bit_compute_dtype: bfloat16
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "/workspace/llama-2-70b-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32.0,
11
+ "lora_dropout": 0.03,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 16,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "v_proj",
18
+ "up_proj",
19
+ "k_proj",
20
+ "q_proj",
21
+ "down_proj",
22
+ "o_proj",
23
+ "gate_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cc71ff5677ab5080f4a038e1fb63ab9a1bacf78ae44ffbd8f1bf8c428c2c72d
3
+ size 828526568