English
Chinese
zhaozitian commited on
Commit
a9b1c69
1 Parent(s): d787840

Upload 13 files

Browse files
README.md CHANGED
@@ -1,3 +1,22 @@
1
  ---
2
- license: cc-by-sa-4.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ library_name: peft
3
  ---
4
+ ## Training procedure
5
+
6
+
7
+ The following `bitsandbytes` quantization config was used during training:
8
+ - load_in_8bit: True
9
+ - llm_int8_threshold: 6.0
10
+ - llm_int8_skip_modules: None
11
+ - llm_int8_enable_fp32_cpu_offload: False
12
+
13
+ The following `bitsandbytes` quantization config was used during training:
14
+ - load_in_8bit: True
15
+ - llm_int8_threshold: 6.0
16
+ - llm_int8_skip_modules: None
17
+ - llm_int8_enable_fp32_cpu_offload: False
18
+ ### Framework versions
19
+
20
+ - PEFT 0.5.0.dev0
21
+
22
+ - PEFT 0.5.0.dev0
adapter_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "../llama/Llama-2-13b-chat-hf",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.05,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 8,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "q_proj",
18
+ "v_proj"
19
+ ],
20
+ "task_type": "CAUSAL_LM"
21
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5e1621f48d9ad8feb1d6d31050275f0aafd080c5c07153301fe2f48411f4406
3
+ size 443
checkpoint-200/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd252023ad377ab471696a0353253ad4f3c6446c45bafbbafd2171307ba620d8
3
+ size 52562757
checkpoint-200/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:356ded462a5c026117412149a3c38fa2a19d3ada9a86225c129661b4a2d90cf2
3
+ size 26271757
checkpoint-200/rng_state_0.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:819bbebd4638bc94da4de230bc6ce2f470d518a082e944aedd33a78f501377f9
3
+ size 17655
checkpoint-200/rng_state_1.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fb1711896b18fae6b3b01415cfe8630f68197edc43417b873cad79e315270bd
3
+ size 17655
checkpoint-200/rng_state_2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87213c71bd5423f9e0ce464f07994f133bfb98c7344d36f4951e6665a01e0435
3
+ size 17655
checkpoint-200/rng_state_3.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:152127eb111c9847a65aeef6260b412925df7be5584ebe72e4c3f5f0ab72570d
3
+ size 17655
checkpoint-200/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fccf0f9be1bb8f24861e4393745b3e09cc2687125a69e3757955fb0f0925ea5
3
+ size 557
checkpoint-200/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6aadae1c1fe293674b5a6ac7680b50aac3799a07d912e3567ecc67f1214efb1b
3
+ size 627
checkpoint-200/trainer_state.json ADDED
@@ -0,0 +1,144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.7962789535522461,
3
+ "best_model_checkpoint": "../outputs/13b_chat_lora_0/checkpoint-200",
4
+ "epoch": 0.6825938566552902,
5
+ "global_step": 200,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.03,
12
+ "learning_rate": 2.9999999999999997e-05,
13
+ "loss": 1.591,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.07,
18
+ "learning_rate": 5.9999999999999995e-05,
19
+ "loss": 1.5586,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.1,
24
+ "learning_rate": 8.999999999999999e-05,
25
+ "loss": 1.3909,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.14,
30
+ "learning_rate": 0.00011999999999999999,
31
+ "loss": 1.1395,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.17,
36
+ "learning_rate": 0.00015,
37
+ "loss": 0.9936,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.2,
42
+ "learning_rate": 0.00017999999999999998,
43
+ "loss": 0.939,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.24,
48
+ "learning_rate": 0.00020999999999999998,
49
+ "loss": 0.9151,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.27,
54
+ "learning_rate": 0.00023999999999999998,
55
+ "loss": 0.8975,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.31,
60
+ "learning_rate": 0.00027,
61
+ "loss": 0.8636,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.34,
66
+ "learning_rate": 0.0003,
67
+ "loss": 0.8319,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.38,
72
+ "learning_rate": 0.00028445595854922277,
73
+ "loss": 0.8163,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.41,
78
+ "learning_rate": 0.00026891191709844557,
79
+ "loss": 0.814,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.44,
84
+ "learning_rate": 0.00025336787564766836,
85
+ "loss": 0.8116,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.48,
90
+ "learning_rate": 0.0002378238341968912,
91
+ "loss": 0.8058,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.51,
96
+ "learning_rate": 0.00022227979274611396,
97
+ "loss": 0.8039,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.55,
102
+ "learning_rate": 0.00020673575129533678,
103
+ "loss": 0.8035,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.58,
108
+ "learning_rate": 0.00019119170984455958,
109
+ "loss": 0.7976,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.61,
114
+ "learning_rate": 0.00017564766839378237,
115
+ "loss": 0.7947,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.65,
120
+ "learning_rate": 0.00016010362694300517,
121
+ "loss": 0.7911,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.68,
126
+ "learning_rate": 0.00014455958549222797,
127
+ "loss": 0.7928,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.68,
132
+ "eval_loss": 0.7962789535522461,
133
+ "eval_runtime": 39.6388,
134
+ "eval_samples_per_second": 50.456,
135
+ "eval_steps_per_second": 1.589,
136
+ "step": 200
137
+ }
138
+ ],
139
+ "max_steps": 293,
140
+ "num_train_epochs": 1,
141
+ "total_flos": 6.32024838701056e+17,
142
+ "trial_name": null,
143
+ "trial_params": null
144
+ }
checkpoint-200/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3edcce272f33c7ece5a84e4ff5374199ab781df059da624092e0bb97918b44d
3
+ size 3899