tanatapanun commited on
Commit
0b7999e
·
verified ·
1 Parent(s): b79fcdc

Training in progress, step 500

Browse files
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f63de6b452244fcd86ba168d9250529c4b9dd34587098ff5ff9e98d94046ff95
3
  size 560837164
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13588ccaca7863933ec8339195f9f63fde9751d81648643a441823588be97d2b
3
  size 560837164
runs/Jan24_19-31-27_william-gpu-3090-10-8vlnc/events.out.tfevents.1706124688.william-gpu-3090-10-8vlnc.4125.1 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:44ecf8202a5876fec5fd107f6b2e338f7837bfb1d2b15aa799b457d309932130
3
- size 14488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f2283067947411da78f3146d700d05c23a931616f510cddc2f42e8a86d67ca0
3
+ size 15695
runs/Jan24_20-07-13_william-gpu-3090-10-8vlnc/events.out.tfevents.1706126835.william-gpu-3090-10-8vlnc.4362.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7bda0bd8c5c851d769c859d73f9b890378407891f1ce61afff79b7d3df8729a1
3
+ size 6407
tmp-checkpoint-5000/config.json ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "GanjinZero/biobart-base",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "add_bias_logits": false,
6
+ "add_final_layer_norm": false,
7
+ "architectures": [
8
+ "BartForConditionalGeneration"
9
+ ],
10
+ "attention_dropout": 0.1,
11
+ "bos_token_id": 0,
12
+ "classif_dropout": 0.1,
13
+ "classifier_dropout": 0.0,
14
+ "d_model": 768,
15
+ "decoder_attention_heads": 12,
16
+ "decoder_ffn_dim": 3072,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 6,
19
+ "decoder_start_token_id": 2,
20
+ "dropout": 0.1,
21
+ "early_stopping": true,
22
+ "encoder_attention_heads": 12,
23
+ "encoder_ffn_dim": 3072,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 6,
26
+ "eos_token_id": 2,
27
+ "forced_eos_token_id": 2,
28
+ "gradient_checkpointing": false,
29
+ "id2label": {
30
+ "0": "LABEL_0",
31
+ "1": "LABEL_1",
32
+ "2": "LABEL_2"
33
+ },
34
+ "init_std": 0.02,
35
+ "is_encoder_decoder": true,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_2": 2
40
+ },
41
+ "max_length": 256,
42
+ "max_position_embeddings": 1500,
43
+ "model_type": "bart",
44
+ "no_repeat_ngram_size": 3,
45
+ "normalize_before": false,
46
+ "normalize_embedding": true,
47
+ "num_beams": 4,
48
+ "num_hidden_layers": 6,
49
+ "pad_token_id": 1,
50
+ "scale_embedding": false,
51
+ "task_specific_params": {
52
+ "summarization": {
53
+ "length_penalty": 1.0,
54
+ "max_length": 128,
55
+ "min_length": 12,
56
+ "num_beams": 4
57
+ },
58
+ "summarization_cnn": {
59
+ "length_penalty": 2.0,
60
+ "max_length": 142,
61
+ "min_length": 56,
62
+ "num_beams": 4
63
+ },
64
+ "summarization_xsum": {
65
+ "length_penalty": 1.0,
66
+ "max_length": 62,
67
+ "min_length": 11,
68
+ "num_beams": 6
69
+ }
70
+ },
71
+ "torch_dtype": "float32",
72
+ "transformers_version": "4.36.2",
73
+ "use_cache": true,
74
+ "vocab_size": 50265
75
+ }
tmp-checkpoint-5000/generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 0,
3
+ "decoder_start_token_id": 2,
4
+ "early_stopping": true,
5
+ "eos_token_id": 2,
6
+ "forced_eos_token_id": 2,
7
+ "max_length": 256,
8
+ "no_repeat_ngram_size": 3,
9
+ "num_beams": 4,
10
+ "pad_token_id": 1,
11
+ "transformers_version": "4.36.2"
12
+ }
tmp-checkpoint-5000/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tmp-checkpoint-5000/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2d66fab2834e74b4cd13d35b2cfcad39d0308cb263884004288bda37df8c53c
3
+ size 560837164
tmp-checkpoint-5000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43f8314bc414d836f6f1805d0fa0b07dd1540077459979bfdd6e57c3149e0ab4
3
+ size 1121426543
tmp-checkpoint-5000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3a969f69ace2a9fde6bc6a6c6a4d8cb016999f22fc9568d6f8b45381cc6b53d
3
+ size 14503
tmp-checkpoint-5000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3150b7c15891230da7b0f4bb1993e9da93664827e9fb35fc57ca9e26eb3fb9de
3
+ size 623
tmp-checkpoint-5000/special_tokens_map.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "cls_token": {
10
+ "content": "<s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "eos_token": {
17
+ "content": "</s>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "mask_token": {
24
+ "content": "<mask>",
25
+ "lstrip": true,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ },
30
+ "pad_token": {
31
+ "content": "<pad>",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false
36
+ },
37
+ "sep_token": {
38
+ "content": "</s>",
39
+ "lstrip": false,
40
+ "normalized": true,
41
+ "rstrip": false,
42
+ "single_word": false
43
+ },
44
+ "unk_token": {
45
+ "content": "<unk>",
46
+ "lstrip": false,
47
+ "normalized": true,
48
+ "rstrip": false,
49
+ "single_word": false
50
+ }
51
+ }
tmp-checkpoint-5000/tokenizer_config.json ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": "<s>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": "<pad>",
14
+ "lstrip": false,
15
+ "normalized": true,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": true,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "50264": {
37
+ "content": "<mask>",
38
+ "lstrip": true,
39
+ "normalized": true,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ }
44
+ },
45
+ "bos_token": "<s>",
46
+ "clean_up_tokenization_spaces": true,
47
+ "cls_token": "<s>",
48
+ "eos_token": "</s>",
49
+ "errors": "replace",
50
+ "mask_token": "<mask>",
51
+ "model_max_length": 1000000000000000019884624838656,
52
+ "pad_token": "<pad>",
53
+ "sep_token": "</s>",
54
+ "tokenizer_class": "BartTokenizer",
55
+ "unk_token": "<unk>"
56
+ }
tmp-checkpoint-5000/trainer_state.json ADDED
@@ -0,0 +1,289 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 16.611295681063122,
5
+ "eval_steps": 500,
6
+ "global_step": 5000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_gen_len": 7.59,
14
+ "eval_loss": 2.54074764251709,
15
+ "eval_rouge1": 0.0058,
16
+ "eval_rouge2": 0.0,
17
+ "eval_rougeL": 0.0055,
18
+ "eval_rougeLsum": 0.0054,
19
+ "eval_runtime": 8.576,
20
+ "eval_samples_per_second": 11.66,
21
+ "eval_steps_per_second": 2.915,
22
+ "step": 301
23
+ },
24
+ {
25
+ "epoch": 1.66,
26
+ "learning_rate": 8.305647840531562e-05,
27
+ "loss": 5.2836,
28
+ "step": 500
29
+ },
30
+ {
31
+ "epoch": 2.0,
32
+ "eval_gen_len": 52.76,
33
+ "eval_loss": 1.0132805109024048,
34
+ "eval_rouge1": 0.1011,
35
+ "eval_rouge2": 0.0238,
36
+ "eval_rougeL": 0.0761,
37
+ "eval_rougeLsum": 0.0751,
38
+ "eval_runtime": 45.9141,
39
+ "eval_samples_per_second": 2.178,
40
+ "eval_steps_per_second": 0.544,
41
+ "step": 602
42
+ },
43
+ {
44
+ "epoch": 3.0,
45
+ "eval_gen_len": 9.85,
46
+ "eval_loss": 0.923646867275238,
47
+ "eval_rouge1": 0.0549,
48
+ "eval_rouge2": 0.0088,
49
+ "eval_rougeL": 0.052,
50
+ "eval_rougeLsum": 0.0518,
51
+ "eval_runtime": 15.2679,
52
+ "eval_samples_per_second": 6.55,
53
+ "eval_steps_per_second": 1.637,
54
+ "step": 903
55
+ },
56
+ {
57
+ "epoch": 3.32,
58
+ "learning_rate": 9.265411590992987e-05,
59
+ "loss": 0.9572,
60
+ "step": 1000
61
+ },
62
+ {
63
+ "epoch": 4.0,
64
+ "eval_gen_len": 24.49,
65
+ "eval_loss": 0.8768377900123596,
66
+ "eval_rouge1": 0.0825,
67
+ "eval_rouge2": 0.0209,
68
+ "eval_rougeL": 0.0586,
69
+ "eval_rougeLsum": 0.058,
70
+ "eval_runtime": 18.7194,
71
+ "eval_samples_per_second": 5.342,
72
+ "eval_steps_per_second": 1.336,
73
+ "step": 1204
74
+ },
75
+ {
76
+ "epoch": 4.98,
77
+ "learning_rate": 8.342561830933924e-05,
78
+ "loss": 0.7658,
79
+ "step": 1500
80
+ },
81
+ {
82
+ "epoch": 5.0,
83
+ "eval_gen_len": 26.0,
84
+ "eval_loss": 0.8518986701965332,
85
+ "eval_rouge1": 0.1302,
86
+ "eval_rouge2": 0.0282,
87
+ "eval_rougeL": 0.1024,
88
+ "eval_rougeLsum": 0.1023,
89
+ "eval_runtime": 18.4105,
90
+ "eval_samples_per_second": 5.432,
91
+ "eval_steps_per_second": 1.358,
92
+ "step": 1505
93
+ },
94
+ {
95
+ "epoch": 6.0,
96
+ "eval_gen_len": 45.62,
97
+ "eval_loss": 0.8450735211372375,
98
+ "eval_rouge1": 0.1689,
99
+ "eval_rouge2": 0.0362,
100
+ "eval_rougeL": 0.134,
101
+ "eval_rougeLsum": 0.1345,
102
+ "eval_runtime": 26.7906,
103
+ "eval_samples_per_second": 3.733,
104
+ "eval_steps_per_second": 0.933,
105
+ "step": 1806
106
+ },
107
+ {
108
+ "epoch": 6.64,
109
+ "learning_rate": 7.419712070874863e-05,
110
+ "loss": 0.5785,
111
+ "step": 2000
112
+ },
113
+ {
114
+ "epoch": 7.0,
115
+ "eval_gen_len": 38.25,
116
+ "eval_loss": 0.8455685377120972,
117
+ "eval_rouge1": 0.1548,
118
+ "eval_rouge2": 0.0338,
119
+ "eval_rougeL": 0.121,
120
+ "eval_rougeLsum": 0.12,
121
+ "eval_runtime": 22.2546,
122
+ "eval_samples_per_second": 4.493,
123
+ "eval_steps_per_second": 1.123,
124
+ "step": 2107
125
+ },
126
+ {
127
+ "epoch": 8.0,
128
+ "eval_gen_len": 29.5,
129
+ "eval_loss": 0.8484403491020203,
130
+ "eval_rouge1": 0.1394,
131
+ "eval_rouge2": 0.0365,
132
+ "eval_rougeL": 0.1093,
133
+ "eval_rougeLsum": 0.1095,
134
+ "eval_runtime": 19.9659,
135
+ "eval_samples_per_second": 5.009,
136
+ "eval_steps_per_second": 1.252,
137
+ "step": 2408
138
+ },
139
+ {
140
+ "epoch": 8.31,
141
+ "learning_rate": 6.4968623108158e-05,
142
+ "loss": 0.4638,
143
+ "step": 2500
144
+ },
145
+ {
146
+ "epoch": 9.0,
147
+ "eval_gen_len": 32.69,
148
+ "eval_loss": 0.8562068343162537,
149
+ "eval_rouge1": 0.1665,
150
+ "eval_rouge2": 0.0483,
151
+ "eval_rougeL": 0.13,
152
+ "eval_rougeLsum": 0.1299,
153
+ "eval_runtime": 21.4271,
154
+ "eval_samples_per_second": 4.667,
155
+ "eval_steps_per_second": 1.167,
156
+ "step": 2709
157
+ },
158
+ {
159
+ "epoch": 9.97,
160
+ "learning_rate": 5.574012550756738e-05,
161
+ "loss": 0.359,
162
+ "step": 3000
163
+ },
164
+ {
165
+ "epoch": 10.0,
166
+ "eval_gen_len": 25.89,
167
+ "eval_loss": 0.878343403339386,
168
+ "eval_rouge1": 0.1471,
169
+ "eval_rouge2": 0.0421,
170
+ "eval_rougeL": 0.1211,
171
+ "eval_rougeLsum": 0.1192,
172
+ "eval_runtime": 17.1365,
173
+ "eval_samples_per_second": 5.836,
174
+ "eval_steps_per_second": 1.459,
175
+ "step": 3010
176
+ },
177
+ {
178
+ "epoch": 11.0,
179
+ "eval_gen_len": 32.17,
180
+ "eval_loss": 0.874755322933197,
181
+ "eval_rouge1": 0.1564,
182
+ "eval_rouge2": 0.0363,
183
+ "eval_rougeL": 0.1208,
184
+ "eval_rougeLsum": 0.1183,
185
+ "eval_runtime": 19.0439,
186
+ "eval_samples_per_second": 5.251,
187
+ "eval_steps_per_second": 1.313,
188
+ "step": 3311
189
+ },
190
+ {
191
+ "epoch": 11.63,
192
+ "learning_rate": 4.651162790697675e-05,
193
+ "loss": 0.2639,
194
+ "step": 3500
195
+ },
196
+ {
197
+ "epoch": 12.0,
198
+ "eval_gen_len": 39.23,
199
+ "eval_loss": 0.8847034573554993,
200
+ "eval_rouge1": 0.1727,
201
+ "eval_rouge2": 0.0524,
202
+ "eval_rougeL": 0.1349,
203
+ "eval_rougeLsum": 0.1347,
204
+ "eval_runtime": 23.6964,
205
+ "eval_samples_per_second": 4.22,
206
+ "eval_steps_per_second": 1.055,
207
+ "step": 3612
208
+ },
209
+ {
210
+ "epoch": 13.0,
211
+ "eval_gen_len": 41.93,
212
+ "eval_loss": 0.8969386219978333,
213
+ "eval_rouge1": 0.156,
214
+ "eval_rouge2": 0.0349,
215
+ "eval_rougeL": 0.1209,
216
+ "eval_rougeLsum": 0.1204,
217
+ "eval_runtime": 24.7305,
218
+ "eval_samples_per_second": 4.044,
219
+ "eval_steps_per_second": 1.011,
220
+ "step": 3913
221
+ },
222
+ {
223
+ "epoch": 13.29,
224
+ "learning_rate": 3.728313030638612e-05,
225
+ "loss": 0.1897,
226
+ "step": 4000
227
+ },
228
+ {
229
+ "epoch": 14.0,
230
+ "eval_gen_len": 38.77,
231
+ "eval_loss": 0.9095367193222046,
232
+ "eval_rouge1": 0.1606,
233
+ "eval_rouge2": 0.0457,
234
+ "eval_rougeL": 0.1237,
235
+ "eval_rougeLsum": 0.1237,
236
+ "eval_runtime": 22.2241,
237
+ "eval_samples_per_second": 4.5,
238
+ "eval_steps_per_second": 1.125,
239
+ "step": 4214
240
+ },
241
+ {
242
+ "epoch": 14.95,
243
+ "learning_rate": 2.8054632705795497e-05,
244
+ "loss": 0.1431,
245
+ "step": 4500
246
+ },
247
+ {
248
+ "epoch": 15.0,
249
+ "eval_gen_len": 31.47,
250
+ "eval_loss": 0.9321858882904053,
251
+ "eval_rouge1": 0.1326,
252
+ "eval_rouge2": 0.0355,
253
+ "eval_rougeL": 0.0999,
254
+ "eval_rougeLsum": 0.0999,
255
+ "eval_runtime": 18.5828,
256
+ "eval_samples_per_second": 5.381,
257
+ "eval_steps_per_second": 1.345,
258
+ "step": 4515
259
+ },
260
+ {
261
+ "epoch": 16.0,
262
+ "eval_gen_len": 38.33,
263
+ "eval_loss": 0.9329277276992798,
264
+ "eval_rouge1": 0.1574,
265
+ "eval_rouge2": 0.0436,
266
+ "eval_rougeL": 0.1197,
267
+ "eval_rougeLsum": 0.1196,
268
+ "eval_runtime": 23.3814,
269
+ "eval_samples_per_second": 4.277,
270
+ "eval_steps_per_second": 1.069,
271
+ "step": 4816
272
+ },
273
+ {
274
+ "epoch": 16.61,
275
+ "learning_rate": 1.882613510520487e-05,
276
+ "loss": 0.0998,
277
+ "step": 5000
278
+ }
279
+ ],
280
+ "logging_steps": 500,
281
+ "max_steps": 6020,
282
+ "num_input_tokens_seen": 0,
283
+ "num_train_epochs": 20,
284
+ "save_steps": 500,
285
+ "total_flos": 1.782050070528e+16,
286
+ "train_batch_size": 4,
287
+ "trial_name": null,
288
+ "trial_params": null
289
+ }
tmp-checkpoint-5000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f72711b887cbed0cffe0244e68691c5b1fa4c683b86522656866bfd1bc7d5d60
3
+ size 4463
tmp-checkpoint-5000/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f72711b887cbed0cffe0244e68691c5b1fa4c683b86522656866bfd1bc7d5d60
3
  size 4463
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47e72807479ad7a934ee17b52d43f1c0c409dfed1616f888d3a719f361abefbf
3
  size 4463