oguuzhansahin commited on
Commit
7de53d1
1 Parent(s): d4f485d

Upload ./ with huggingface_hub

Browse files
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/flan-t5-large",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2816,
7
+ "d_kv": 64,
8
+ "d_model": 1024,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 24,
21
+ "num_heads": 16,
22
+ "num_layers": 24,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "tie_word_embeddings": false,
28
+ "torch_dtype": "float32",
29
+ "transformers_version": "4.26.1",
30
+ "use_cache": true,
31
+ "vocab_size": 32128
32
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "decoder_start_token_id": 0,
4
+ "eos_token_id": 1,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.26.1"
7
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dff9b9cf27fc796d85111642ce107a09bd5df8b74931dddf1942d2e0f7cc298
3
+ size 6265537309
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2a79bf58146f2b0eb690838a3ebd3f2e28766002e396a30985194f1171d4127
3
+ size 3132793669
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b0da9c41f3a2274f2b1446341f810a1a6d2df648639b4402e936cac2fa7c4cc
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7954effb3ff235e7d009f2c81769140d27337b36d3cfd3a13afd2e649f203f74
3
+ size 627
trainer_state.json ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.2028405666351318,
3
+ "best_model_checkpoint": "flan-t5-large-samsum/checkpoint-3683",
4
+ "epoch": 5.0,
5
+ "global_step": 18415,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.14,
12
+ "learning_rate": 4.8642411077925605e-05,
13
+ "loss": 1.2079,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.27,
18
+ "learning_rate": 4.728482215585121e-05,
19
+ "loss": 1.2091,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.41,
24
+ "learning_rate": 4.592723323377681e-05,
25
+ "loss": 1.1932,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.54,
30
+ "learning_rate": 4.456964431170242e-05,
31
+ "loss": 1.1904,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.68,
36
+ "learning_rate": 4.321205538962802e-05,
37
+ "loss": 1.1767,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.81,
42
+ "learning_rate": 4.1854466467553626e-05,
43
+ "loss": 1.1794,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 0.95,
48
+ "learning_rate": 4.049687754547923e-05,
49
+ "loss": 1.1814,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 1.0,
54
+ "eval_gen_len": 16.73870573870574,
55
+ "eval_loss": 1.2028405666351318,
56
+ "eval_rouge1": 48.847,
57
+ "eval_rouge2": 25.4282,
58
+ "eval_rougeL": 41.7343,
59
+ "eval_rougeLsum": 44.9999,
60
+ "eval_runtime": 280.4507,
61
+ "eval_samples_per_second": 2.92,
62
+ "eval_steps_per_second": 0.731,
63
+ "step": 3683
64
+ },
65
+ {
66
+ "epoch": 1.09,
67
+ "learning_rate": 3.913928862340484e-05,
68
+ "loss": 1.0624,
69
+ "step": 4000
70
+ },
71
+ {
72
+ "epoch": 1.22,
73
+ "learning_rate": 3.7781699701330434e-05,
74
+ "loss": 1.0085,
75
+ "step": 4500
76
+ },
77
+ {
78
+ "epoch": 1.36,
79
+ "learning_rate": 3.6424110779256044e-05,
80
+ "loss": 0.9909,
81
+ "step": 5000
82
+ },
83
+ {
84
+ "epoch": 1.49,
85
+ "learning_rate": 3.506652185718165e-05,
86
+ "loss": 1.0098,
87
+ "step": 5500
88
+ },
89
+ {
90
+ "epoch": 1.63,
91
+ "learning_rate": 3.370893293510725e-05,
92
+ "loss": 1.0163,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 1.76,
97
+ "learning_rate": 3.235134401303285e-05,
98
+ "loss": 1.0424,
99
+ "step": 6500
100
+ },
101
+ {
102
+ "epoch": 1.9,
103
+ "learning_rate": 3.099375509095846e-05,
104
+ "loss": 1.0294,
105
+ "step": 7000
106
+ },
107
+ {
108
+ "epoch": 2.0,
109
+ "eval_gen_len": 17.39072039072039,
110
+ "eval_loss": 1.2175439596176147,
111
+ "eval_rouge1": 49.175,
112
+ "eval_rouge2": 25.9148,
113
+ "eval_rougeL": 41.729,
114
+ "eval_rougeLsum": 45.2583,
115
+ "eval_runtime": 283.3808,
116
+ "eval_samples_per_second": 2.89,
117
+ "eval_steps_per_second": 0.723,
118
+ "step": 7366
119
+ },
120
+ {
121
+ "epoch": 2.04,
122
+ "learning_rate": 2.963616616888406e-05,
123
+ "loss": 1.0013,
124
+ "step": 7500
125
+ },
126
+ {
127
+ "epoch": 2.17,
128
+ "learning_rate": 2.8278577246809667e-05,
129
+ "loss": 0.8928,
130
+ "step": 8000
131
+ },
132
+ {
133
+ "epoch": 2.31,
134
+ "learning_rate": 2.692098832473527e-05,
135
+ "loss": 0.8853,
136
+ "step": 8500
137
+ },
138
+ {
139
+ "epoch": 2.44,
140
+ "learning_rate": 2.5563399402660876e-05,
141
+ "loss": 0.8968,
142
+ "step": 9000
143
+ },
144
+ {
145
+ "epoch": 2.58,
146
+ "learning_rate": 2.420581048058648e-05,
147
+ "loss": 0.8965,
148
+ "step": 9500
149
+ },
150
+ {
151
+ "epoch": 2.72,
152
+ "learning_rate": 2.2848221558512085e-05,
153
+ "loss": 0.8974,
154
+ "step": 10000
155
+ },
156
+ {
157
+ "epoch": 2.85,
158
+ "learning_rate": 2.1490632636437688e-05,
159
+ "loss": 0.8862,
160
+ "step": 10500
161
+ },
162
+ {
163
+ "epoch": 2.99,
164
+ "learning_rate": 2.013304371436329e-05,
165
+ "loss": 0.9026,
166
+ "step": 11000
167
+ },
168
+ {
169
+ "epoch": 3.0,
170
+ "eval_gen_len": 17.076923076923077,
171
+ "eval_loss": 1.2396090030670166,
172
+ "eval_rouge1": 49.1776,
173
+ "eval_rouge2": 25.5811,
174
+ "eval_rougeL": 41.6807,
175
+ "eval_rougeLsum": 44.9973,
176
+ "eval_runtime": 281.6416,
177
+ "eval_samples_per_second": 2.908,
178
+ "eval_steps_per_second": 0.728,
179
+ "step": 11049
180
+ },
181
+ {
182
+ "epoch": 3.12,
183
+ "learning_rate": 1.8775454792288897e-05,
184
+ "loss": 0.8011,
185
+ "step": 11500
186
+ },
187
+ {
188
+ "epoch": 3.26,
189
+ "learning_rate": 1.74178658702145e-05,
190
+ "loss": 0.7933,
191
+ "step": 12000
192
+ },
193
+ {
194
+ "epoch": 3.39,
195
+ "learning_rate": 1.6060276948140106e-05,
196
+ "loss": 0.8209,
197
+ "step": 12500
198
+ },
199
+ {
200
+ "epoch": 3.53,
201
+ "learning_rate": 1.4702688026065709e-05,
202
+ "loss": 0.8106,
203
+ "step": 13000
204
+ },
205
+ {
206
+ "epoch": 3.67,
207
+ "learning_rate": 1.3345099103991313e-05,
208
+ "loss": 0.8001,
209
+ "step": 13500
210
+ },
211
+ {
212
+ "epoch": 3.8,
213
+ "learning_rate": 1.1987510181916916e-05,
214
+ "loss": 0.8196,
215
+ "step": 14000
216
+ },
217
+ {
218
+ "epoch": 3.94,
219
+ "learning_rate": 1.062992125984252e-05,
220
+ "loss": 0.808,
221
+ "step": 14500
222
+ },
223
+ {
224
+ "epoch": 4.0,
225
+ "eval_gen_len": 17.313797313797313,
226
+ "eval_loss": 1.2748357057571411,
227
+ "eval_rouge1": 49.3102,
228
+ "eval_rouge2": 25.9028,
229
+ "eval_rougeL": 42.1036,
230
+ "eval_rougeLsum": 45.485,
231
+ "eval_runtime": 281.9584,
232
+ "eval_samples_per_second": 2.905,
233
+ "eval_steps_per_second": 0.727,
234
+ "step": 14732
235
+ },
236
+ {
237
+ "epoch": 4.07,
238
+ "learning_rate": 9.272332337768125e-06,
239
+ "loss": 0.7884,
240
+ "step": 15000
241
+ },
242
+ {
243
+ "epoch": 4.21,
244
+ "learning_rate": 7.91474341569373e-06,
245
+ "loss": 0.7615,
246
+ "step": 15500
247
+ },
248
+ {
249
+ "epoch": 4.34,
250
+ "learning_rate": 6.557154493619333e-06,
251
+ "loss": 0.752,
252
+ "step": 16000
253
+ },
254
+ {
255
+ "epoch": 4.48,
256
+ "learning_rate": 5.199565571544936e-06,
257
+ "loss": 0.748,
258
+ "step": 16500
259
+ },
260
+ {
261
+ "epoch": 4.62,
262
+ "learning_rate": 3.8419766494705405e-06,
263
+ "loss": 0.7518,
264
+ "step": 17000
265
+ },
266
+ {
267
+ "epoch": 4.75,
268
+ "learning_rate": 2.4843877273961445e-06,
269
+ "loss": 0.7522,
270
+ "step": 17500
271
+ },
272
+ {
273
+ "epoch": 4.89,
274
+ "learning_rate": 1.1267988053217486e-06,
275
+ "loss": 0.7482,
276
+ "step": 18000
277
+ },
278
+ {
279
+ "epoch": 5.0,
280
+ "eval_gen_len": 17.304029304029303,
281
+ "eval_loss": 1.3044484853744507,
282
+ "eval_rouge1": 49.1547,
283
+ "eval_rouge2": 25.5204,
284
+ "eval_rougeL": 41.9049,
285
+ "eval_rougeLsum": 45.2342,
286
+ "eval_runtime": 283.7828,
287
+ "eval_samples_per_second": 2.886,
288
+ "eval_steps_per_second": 0.722,
289
+ "step": 18415
290
+ }
291
+ ],
292
+ "max_steps": 18415,
293
+ "num_train_epochs": 5,
294
+ "total_flos": 2.5465381088919552e+17,
295
+ "trial_name": null,
296
+ "trial_params": null
297
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f063f7a15c9d303c2f8ea8ff616907447cd6da894916a80ebc834aa3adc33e62
3
+ size 3643