wesley7137 commited on
Commit
05f4acb
1 Parent(s): 96c9752

Upload 11 files

Browse files
README.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: False
9
+ - load_in_4bit: True
10
+ - llm_int8_threshold: 6.0
11
+ - llm_int8_skip_modules: None
12
+ - llm_int8_enable_fp32_cpu_offload: False
13
+ - llm_int8_has_fp16_weight: False
14
+ - bnb_4bit_quant_type: nf4
15
+ - bnb_4bit_use_double_quant: False
16
+ - bnb_4bit_compute_dtype: float16
17
+
18
+ The following `bitsandbytes` quantization config was used during training:
19
+ - load_in_8bit: False
20
+ - load_in_4bit: True
21
+ - llm_int8_threshold: 6.0
22
+ - llm_int8_skip_modules: None
23
+ - llm_int8_enable_fp32_cpu_offload: False
24
+ - llm_int8_has_fp16_weight: False
25
+ - bnb_4bit_quant_type: nf4
26
+ - bnb_4bit_use_double_quant: False
27
+ - bnb_4bit_compute_dtype: float16
28
+ ### Framework versions
29
+
30
+ - PEFT 0.4.0.dev0
31
+
32
+ - PEFT 0.4.0.dev0
adapter_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "ybelkada/falcon-7b-sharded-bf16",
3
+ "bias": "none",
4
+ "fan_in_fan_out": false,
5
+ "inference_mode": true,
6
+ "init_lora_weights": true,
7
+ "layers_pattern": null,
8
+ "layers_to_transform": null,
9
+ "lora_alpha": 16,
10
+ "lora_dropout": 0.1,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 64,
14
+ "revision": null,
15
+ "target_modules": [
16
+ "query_key_value",
17
+ "dense",
18
+ "dense_h_to_4h",
19
+ "dense_4h_to_h"
20
+ ],
21
+ "task_type": "CAUSAL_LM"
22
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c448ca1d4d024d3834bc0500206b126fb14f6fe00da23ef479baf7441c29d23
3
+ size 522284877
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73aaa523b20937e24c3ac630bc370a132ae9eb420f79ae9d39a3109e03d530a2
3
+ size 1044539909
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c4614bef8d0c76e0e44e010604810a857e98dd94c60e241bdcdc972ef87c36e
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4202ccb0f2af9a168fa8c61fa3aea5d65d04bab329ea2372802c650783fb8432
3
+ size 627
special_tokens_map.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ ">>TITLE<<",
4
+ ">>ABSTRACT<<",
5
+ ">>INTRODUCTION<<",
6
+ ">>SUMMARY<<",
7
+ ">>COMMENT<<",
8
+ ">>ANSWER<<",
9
+ ">>QUESTION<<",
10
+ ">>DOMAIN<<",
11
+ ">>PREFIX<<",
12
+ ">>SUFFIX<<",
13
+ ">>MIDDLE<<"
14
+ ],
15
+ "eos_token": "<|endoftext|>",
16
+ "pad_token": "<|endoftext|>"
17
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "clean_up_tokenization_spaces": true,
4
+ "eos_token": "<|endoftext|>",
5
+ "model_max_length": 2048,
6
+ "tokenizer_class": "PreTrainedTokenizerFast"
7
+ }
trainer_state.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.2742857142857143,
5
+ "global_step": 480,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.02,
12
+ "learning_rate": 0.0002,
13
+ "loss": 3.7468,
14
+ "step": 30
15
+ },
16
+ {
17
+ "epoch": 0.03,
18
+ "learning_rate": 0.0002,
19
+ "loss": 3.4221,
20
+ "step": 60
21
+ },
22
+ {
23
+ "epoch": 0.05,
24
+ "learning_rate": 0.0002,
25
+ "loss": 3.2415,
26
+ "step": 90
27
+ },
28
+ {
29
+ "epoch": 0.07,
30
+ "learning_rate": 0.0002,
31
+ "loss": 3.087,
32
+ "step": 120
33
+ },
34
+ {
35
+ "epoch": 0.09,
36
+ "learning_rate": 0.0002,
37
+ "loss": 3.0862,
38
+ "step": 150
39
+ },
40
+ {
41
+ "epoch": 0.1,
42
+ "learning_rate": 0.0002,
43
+ "loss": 2.8754,
44
+ "step": 180
45
+ },
46
+ {
47
+ "epoch": 0.12,
48
+ "learning_rate": 0.0002,
49
+ "loss": 2.8564,
50
+ "step": 210
51
+ },
52
+ {
53
+ "epoch": 0.14,
54
+ "learning_rate": 0.0002,
55
+ "loss": 2.7123,
56
+ "step": 240
57
+ },
58
+ {
59
+ "epoch": 0.15,
60
+ "learning_rate": 0.0002,
61
+ "loss": 2.605,
62
+ "step": 270
63
+ },
64
+ {
65
+ "epoch": 0.17,
66
+ "learning_rate": 0.0002,
67
+ "loss": 2.6557,
68
+ "step": 300
69
+ },
70
+ {
71
+ "epoch": 0.19,
72
+ "learning_rate": 0.0002,
73
+ "loss": 2.5594,
74
+ "step": 330
75
+ },
76
+ {
77
+ "epoch": 0.21,
78
+ "learning_rate": 0.0002,
79
+ "loss": 2.4986,
80
+ "step": 360
81
+ },
82
+ {
83
+ "epoch": 0.22,
84
+ "learning_rate": 0.0002,
85
+ "loss": 2.4444,
86
+ "step": 390
87
+ },
88
+ {
89
+ "epoch": 0.24,
90
+ "learning_rate": 0.0002,
91
+ "loss": 2.356,
92
+ "step": 420
93
+ },
94
+ {
95
+ "epoch": 0.26,
96
+ "learning_rate": 0.0002,
97
+ "loss": 2.4237,
98
+ "step": 450
99
+ },
100
+ {
101
+ "epoch": 0.27,
102
+ "learning_rate": 0.0002,
103
+ "loss": 2.2358,
104
+ "step": 480
105
+ }
106
+ ],
107
+ "max_steps": 500,
108
+ "num_train_epochs": 1,
109
+ "total_flos": 8.124998979944448e+16,
110
+ "trial_name": null,
111
+ "trial_params": null
112
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b664491645b0e57986449ca5d91be384564d3588746d6dc133503dcce5be5215
3
+ size 3899