codefactory4791 commited on
Commit
d670345
1 Parent(s): 341ceb1

End of training

Browse files
last-checkpoint/generation_config.json → generation_config.json RENAMED
File without changes
last-checkpoint/config.json DELETED
@@ -1,42 +0,0 @@
1
- {
2
- "_name_or_path": "openai/whisper-small",
3
- "activation_dropout": 0.0,
4
- "activation_function": "gelu",
5
- "architectures": [
6
- "WhisperForConditionalGeneration"
7
- ],
8
- "attention_dropout": 0.0,
9
- "begin_suppress_tokens": [
10
- 220,
11
- 50257
12
- ],
13
- "bos_token_id": 50257,
14
- "d_model": 768,
15
- "decoder_attention_heads": 12,
16
- "decoder_ffn_dim": 3072,
17
- "decoder_layerdrop": 0.0,
18
- "decoder_layers": 12,
19
- "decoder_start_token_id": 50258,
20
- "dropout": 0.0,
21
- "encoder_attention_heads": 12,
22
- "encoder_ffn_dim": 3072,
23
- "encoder_layerdrop": 0.0,
24
- "encoder_layers": 12,
25
- "eos_token_id": 50257,
26
- "forced_decoder_ids": null,
27
- "init_std": 0.02,
28
- "is_encoder_decoder": true,
29
- "max_length": 448,
30
- "max_source_positions": 1500,
31
- "max_target_positions": 448,
32
- "model_type": "whisper",
33
- "num_hidden_layers": 12,
34
- "num_mel_bins": 80,
35
- "pad_token_id": 50257,
36
- "scale_embedding": false,
37
- "suppress_tokens": [],
38
- "torch_dtype": "float32",
39
- "transformers_version": "4.26.1",
40
- "use_cache": true,
41
- "vocab_size": 51865
42
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2341c3e08addcee1f80703cf5ae5b9bb54f31defa980ba4e189bbc5fc5570cd6
3
- size 734250209
 
 
 
 
last-checkpoint/preprocessor_config.json DELETED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/pytorch_model.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b71d0c8a36c0a62ee68efda2abb716f422fa634d3114074d6fab2fb32d141546
3
- size 967102601
 
 
 
 
last-checkpoint/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb642eeaf18fedb79ccd61a8c464d040ca5f6dc4a25be7adb1fa64c2fabdad73
3
- size 14575
 
 
 
 
last-checkpoint/scaler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ee16356ea91477394f418f2b87beadf6f49ec7eb5382c9d0070098ebb7d494dc
3
- size 557
 
 
 
 
last-checkpoint/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:aa5643ea4489d64078264ab922c2c4f3bde3f465fe865754732347a5a8c21ba8
3
- size 627
 
 
 
 
last-checkpoint/trainer_state.json DELETED
@@ -1,352 +0,0 @@
1
- {
2
- "best_metric": 26.028020838601996,
3
- "best_model_checkpoint": "codefactory4791/whisper-small-medicalv3/checkpoint-8000",
4
- "epoch": 20.408163265306122,
5
- "global_step": 8000,
6
- "is_hyper_param_search": false,
7
- "is_local_process_zero": true,
8
- "is_world_process_zero": true,
9
- "log_history": [
10
- {
11
- "epoch": 0.64,
12
- "learning_rate": 4.92e-06,
13
- "loss": 1.5334,
14
- "step": 250
15
- },
16
- {
17
- "epoch": 1.28,
18
- "learning_rate": 9.920000000000002e-06,
19
- "loss": 0.5352,
20
- "step": 500
21
- },
22
- {
23
- "epoch": 1.28,
24
- "eval_loss": 0.4770798087120056,
25
- "eval_runtime": 342.0388,
26
- "eval_samples_per_second": 4.032,
27
- "eval_steps_per_second": 0.129,
28
- "eval_wer": 51.11526983966416,
29
- "step": 500
30
- },
31
- {
32
- "epoch": 1.91,
33
- "learning_rate": 9.672e-06,
34
- "loss": 0.4987,
35
- "step": 750
36
- },
37
- {
38
- "epoch": 2.55,
39
- "learning_rate": 9.338666666666667e-06,
40
- "loss": 0.418,
41
- "step": 1000
42
- },
43
- {
44
- "epoch": 2.55,
45
- "eval_loss": 0.5147032141685486,
46
- "eval_runtime": 309.2513,
47
- "eval_samples_per_second": 4.459,
48
- "eval_steps_per_second": 0.142,
49
- "eval_wer": 30.79257498356178,
50
- "step": 1000
51
- },
52
- {
53
- "epoch": 3.19,
54
- "learning_rate": 9.008e-06,
55
- "loss": 0.3823,
56
- "step": 1250
57
- },
58
- {
59
- "epoch": 3.83,
60
- "learning_rate": 8.674666666666668e-06,
61
- "loss": 0.334,
62
- "step": 1500
63
- },
64
- {
65
- "epoch": 3.83,
66
- "eval_loss": 0.5190795063972473,
67
- "eval_runtime": 302.0972,
68
- "eval_samples_per_second": 4.565,
69
- "eval_steps_per_second": 0.146,
70
- "eval_wer": 26.928329371301402,
71
- "step": 1500
72
- },
73
- {
74
- "epoch": 4.46,
75
- "learning_rate": 8.341333333333334e-06,
76
- "loss": 0.258,
77
- "step": 1750
78
- },
79
- {
80
- "epoch": 5.1,
81
- "learning_rate": 8.008e-06,
82
- "loss": 0.2341,
83
- "step": 2000
84
- },
85
- {
86
- "epoch": 5.1,
87
- "eval_loss": 0.5760442018508911,
88
- "eval_runtime": 307.2181,
89
- "eval_samples_per_second": 4.489,
90
- "eval_steps_per_second": 0.143,
91
- "eval_wer": 27.874159121946285,
92
- "step": 2000
93
- },
94
- {
95
- "epoch": 5.74,
96
- "learning_rate": 7.674666666666666e-06,
97
- "loss": 0.1805,
98
- "step": 2250
99
- },
100
- {
101
- "epoch": 6.38,
102
- "learning_rate": 7.341333333333334e-06,
103
- "loss": 0.142,
104
- "step": 2500
105
- },
106
- {
107
- "epoch": 6.38,
108
- "eval_loss": 0.6293530464172363,
109
- "eval_runtime": 317.5809,
110
- "eval_samples_per_second": 4.342,
111
- "eval_steps_per_second": 0.139,
112
- "eval_wer": 30.038945930908906,
113
- "step": 2500
114
- },
115
- {
116
- "epoch": 7.02,
117
- "learning_rate": 7.0080000000000005e-06,
118
- "loss": 0.1268,
119
- "step": 2750
120
- },
121
- {
122
- "epoch": 7.65,
123
- "learning_rate": 6.674666666666667e-06,
124
- "loss": 0.085,
125
- "step": 3000
126
- },
127
- {
128
- "epoch": 7.65,
129
- "eval_loss": 0.6510938405990601,
130
- "eval_runtime": 311.9788,
131
- "eval_samples_per_second": 4.42,
132
- "eval_steps_per_second": 0.141,
133
- "eval_wer": 30.013656365383646,
134
- "step": 3000
135
- },
136
- {
137
- "epoch": 8.29,
138
- "learning_rate": 6.341333333333334e-06,
139
- "loss": 0.0763,
140
- "step": 3250
141
- },
142
- {
143
- "epoch": 8.93,
144
- "learning_rate": 6.008000000000001e-06,
145
- "loss": 0.068,
146
- "step": 3500
147
- },
148
- {
149
- "epoch": 8.93,
150
- "eval_loss": 0.6710622906684875,
151
- "eval_runtime": 305.9908,
152
- "eval_samples_per_second": 4.507,
153
- "eval_steps_per_second": 0.144,
154
- "eval_wer": 29.062768701633708,
155
- "step": 3500
156
- },
157
- {
158
- "epoch": 9.57,
159
- "learning_rate": 5.6746666666666675e-06,
160
- "loss": 0.0495,
161
- "step": 3750
162
- },
163
- {
164
- "epoch": 10.2,
165
- "learning_rate": 5.341333333333334e-06,
166
- "loss": 0.0439,
167
- "step": 4000
168
- },
169
- {
170
- "epoch": 10.2,
171
- "eval_loss": 0.7179045677185059,
172
- "eval_runtime": 301.3864,
173
- "eval_samples_per_second": 4.576,
174
- "eval_steps_per_second": 0.146,
175
- "eval_wer": 28.395124171766728,
176
- "step": 4000
177
- },
178
- {
179
- "epoch": 10.84,
180
- "learning_rate": 5.008000000000001e-06,
181
- "loss": 0.0382,
182
- "step": 4250
183
- },
184
- {
185
- "epoch": 11.48,
186
- "learning_rate": 4.674666666666667e-06,
187
- "loss": 0.0305,
188
- "step": 4500
189
- },
190
- {
191
- "epoch": 11.48,
192
- "eval_loss": 0.7301719188690186,
193
- "eval_runtime": 305.5767,
194
- "eval_samples_per_second": 4.513,
195
- "eval_steps_per_second": 0.144,
196
- "eval_wer": 28.632846087704213,
197
- "step": 4500
198
- },
199
- {
200
- "epoch": 12.12,
201
- "learning_rate": 4.341333333333334e-06,
202
- "loss": 0.0276,
203
- "step": 4750
204
- },
205
- {
206
- "epoch": 12.76,
207
- "learning_rate": 4.008e-06,
208
- "loss": 0.0227,
209
- "step": 5000
210
- },
211
- {
212
- "epoch": 12.76,
213
- "eval_loss": 0.7424771189689636,
214
- "eval_runtime": 303.4778,
215
- "eval_samples_per_second": 4.544,
216
- "eval_steps_per_second": 0.145,
217
- "eval_wer": 26.50852258358201,
218
- "step": 5000
219
- },
220
- {
221
- "epoch": 13.39,
222
- "learning_rate": 3.6746666666666667e-06,
223
- "loss": 0.0198,
224
- "step": 5250
225
- },
226
- {
227
- "epoch": 14.03,
228
- "learning_rate": 3.3413333333333335e-06,
229
- "loss": 0.0178,
230
- "step": 5500
231
- },
232
- {
233
- "epoch": 14.03,
234
- "eval_loss": 0.7501226663589478,
235
- "eval_runtime": 303.3747,
236
- "eval_samples_per_second": 4.546,
237
- "eval_steps_per_second": 0.145,
238
- "eval_wer": 28.55697739112842,
239
- "step": 5500
240
- },
241
- {
242
- "epoch": 14.67,
243
- "learning_rate": 3.0080000000000003e-06,
244
- "loss": 0.0146,
245
- "step": 5750
246
- },
247
- {
248
- "epoch": 15.31,
249
- "learning_rate": 2.674666666666667e-06,
250
- "loss": 0.0128,
251
- "step": 6000
252
- },
253
- {
254
- "epoch": 15.31,
255
- "eval_loss": 0.7591447234153748,
256
- "eval_runtime": 300.021,
257
- "eval_samples_per_second": 4.596,
258
- "eval_steps_per_second": 0.147,
259
- "eval_wer": 26.76647615193971,
260
- "step": 6000
261
- },
262
- {
263
- "epoch": 15.94,
264
- "learning_rate": 2.3413333333333333e-06,
265
- "loss": 0.011,
266
- "step": 6250
267
- },
268
- {
269
- "epoch": 16.58,
270
- "learning_rate": 2.008e-06,
271
- "loss": 0.0088,
272
- "step": 6500
273
- },
274
- {
275
- "epoch": 16.58,
276
- "eval_loss": 0.7742175459861755,
277
- "eval_runtime": 304.3909,
278
- "eval_samples_per_second": 4.53,
279
- "eval_steps_per_second": 0.145,
280
- "eval_wer": 27.064893025137827,
281
- "step": 6500
282
- },
283
- {
284
- "epoch": 17.22,
285
- "learning_rate": 1.6746666666666668e-06,
286
- "loss": 0.0076,
287
- "step": 6750
288
- },
289
- {
290
- "epoch": 17.86,
291
- "learning_rate": 1.3413333333333334e-06,
292
- "loss": 0.0062,
293
- "step": 7000
294
- },
295
- {
296
- "epoch": 17.86,
297
- "eval_loss": 0.8007826805114746,
298
- "eval_runtime": 300.4307,
299
- "eval_samples_per_second": 4.59,
300
- "eval_steps_per_second": 0.146,
301
- "eval_wer": 26.199989884173792,
302
- "step": 7000
303
- },
304
- {
305
- "epoch": 18.49,
306
- "learning_rate": 1.0080000000000001e-06,
307
- "loss": 0.0048,
308
- "step": 7250
309
- },
310
- {
311
- "epoch": 19.13,
312
- "learning_rate": 6.746666666666667e-07,
313
- "loss": 0.0034,
314
- "step": 7500
315
- },
316
- {
317
- "epoch": 19.13,
318
- "eval_loss": 0.8073320388793945,
319
- "eval_runtime": 306.8927,
320
- "eval_samples_per_second": 4.493,
321
- "eval_steps_per_second": 0.143,
322
- "eval_wer": 26.776591978149817,
323
- "step": 7500
324
- },
325
- {
326
- "epoch": 19.77,
327
- "learning_rate": 3.4133333333333337e-07,
328
- "loss": 0.0029,
329
- "step": 7750
330
- },
331
- {
332
- "epoch": 20.41,
333
- "learning_rate": 8e-09,
334
- "loss": 0.0015,
335
- "step": 8000
336
- },
337
- {
338
- "epoch": 20.41,
339
- "eval_loss": 0.8103999495506287,
340
- "eval_runtime": 303.3592,
341
- "eval_samples_per_second": 4.546,
342
- "eval_steps_per_second": 0.145,
343
- "eval_wer": 26.028020838601996,
344
- "step": 8000
345
- }
346
- ],
347
- "max_steps": 8000,
348
- "num_train_epochs": 21,
349
- "total_flos": 1.47495998103552e+20,
350
- "trial_name": null,
351
- "trial_params": null
352
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
last-checkpoint/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5b9df3254c5b6427a467145f75c1dd9f58d63e9b7c39c8b3fb93d298e6c2d612
3
- size 3643