marinone94 commited on
Commit
073a108
1 Parent(s): 3583a24

End of training

Browse files
all_results.json CHANGED
@@ -1,17 +1,32 @@
1
  {
2
  "epoch": 1.0,
3
  "eval_loss": 1.6191972494125366,
 
 
 
 
 
4
  "eval_runtime": 56.3363,
5
  "eval_samples_per_second": 0.071,
6
  "eval_steps_per_second": 0.036,
7
  "eval_wer": 153.2258064516129,
 
 
 
 
 
8
  "test_loss": 1.7568330764770508,
 
 
 
 
 
9
  "test_runtime": 37.8582,
10
  "test_samples_per_second": 0.106,
11
  "test_steps_per_second": 0.053,
12
  "test_wer": 138.5964912280702,
13
  "train_loss": 1.4339025020599365,
14
- "train_runtime": 108.1566,
15
- "train_samples_per_second": 0.074,
16
- "train_steps_per_second": 0.018
17
  }
 
1
  {
2
  "epoch": 1.0,
3
  "eval_loss": 1.6191972494125366,
4
+ "eval_pretrained_loss": 1.6191972494125366,
5
+ "eval_pretrained_runtime": 59.7459,
6
+ "eval_pretrained_samples_per_second": 0.067,
7
+ "eval_pretrained_steps_per_second": 0.033,
8
+ "eval_pretrained_wer": 153.2258064516129,
9
  "eval_runtime": 56.3363,
10
  "eval_samples_per_second": 0.071,
11
  "eval_steps_per_second": 0.036,
12
  "eval_wer": 153.2258064516129,
13
+ "test_finetuned_loss": 1.7568330764770508,
14
+ "test_finetuned_runtime": 39.6579,
15
+ "test_finetuned_samples_per_second": 0.101,
16
+ "test_finetuned_steps_per_second": 0.05,
17
+ "test_finetuned_wer": 138.5964912280702,
18
  "test_loss": 1.7568330764770508,
19
+ "test_pretrained_loss": 1.7568330764770508,
20
+ "test_pretrained_runtime": 42.5376,
21
+ "test_pretrained_samples_per_second": 0.094,
22
+ "test_pretrained_steps_per_second": 0.047,
23
+ "test_pretrained_wer": 138.5964912280702,
24
  "test_runtime": 37.8582,
25
  "test_samples_per_second": 0.106,
26
  "test_steps_per_second": 0.053,
27
  "test_wer": 138.5964912280702,
28
  "train_loss": 1.4339025020599365,
29
+ "train_runtime": 102.2429,
30
+ "train_samples_per_second": 0.078,
31
+ "train_steps_per_second": 0.02
32
  }
eval_pretrained_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "eval_pretrained_loss": 1.6191972494125366,
3
+ "eval_pretrained_runtime": 59.7459,
4
+ "eval_pretrained_samples_per_second": 0.067,
5
+ "eval_pretrained_steps_per_second": 0.033,
6
+ "eval_pretrained_wer": 153.2258064516129
7
+ }
huggingface_training.py CHANGED
@@ -322,55 +322,55 @@ I hope you haven't left yet. If you have, bad for you, as we are ready for train
322
  As Whisper is a pretrained model ready to be used off-the-shelf, it is advisable to evaluate it before training on both the validation and test sets. Let's make sure we make no harm to it.
323
  """
324
 
325
- # eval_metrics = trainer.evaluate(
326
- # eval_dataset=preprocessed_dataset["validation"],
327
- # metric_key_prefix="eval",
328
- # max_length=448,
329
- # num_beams=1,
330
- # # gen_kwargs={"key": value} to provide additional generation specific arguments by keyword
331
- # )
332
 
333
- # trainer.log_metrics("eval", eval_metrics)
334
- # trainer.save_metrics("eval", eval_metrics)
335
- # print(eval_metrics)
336
 
337
- # test_metrics = trainer.evaluate(
338
- # eval_dataset=preprocessed_dataset["test"],
339
- # metric_key_prefix="test",
340
- # max_length=448,
341
- # num_beams=1,
342
- # # gen_kwargs={"key": value} to provide additional generation specific arguments by keyword
343
- # )
344
 
345
- # trainer.log_metrics("test", test_metrics)
346
- # trainer.save_metrics("test", test_metrics)
347
- # print(test_metrics)
348
 
349
- # train_result = trainer.train()
350
- # trainer.save_model()
351
 
352
- # metrics = train_result.metrics
353
- # trainer.log_metrics("train", metrics)
354
- # trainer.save_metrics("train", metrics)
355
- # trainer.save_state()
356
- # print(metrics)
357
 
358
  # """ADD SOMETHING ABOUT THE TRAINING.
359
 
360
  # Now let's evaluate the
361
  # """
362
 
363
- # final_metrics = trainer.evaluate(
364
- # eval_dataset=preprocessed_dataset["test"],
365
- # metric_key_prefix="test",
366
- # max_length=448,
367
- # num_beams=1,
368
- # # gen_kwargs={"key": value} to provide additional generation specific arguments by keyword
369
- # )
370
 
371
- # trainer.log_metrics("test", final_metrics)
372
- # trainer.save_metrics("test", final_metrics)
373
- # print(final_metrics)
374
 
375
  # Pushing to hub during training slows down training
376
  # so we push it only in the end.
 
322
  As Whisper is a pretrained model ready to be used off-the-shelf, it is advisable to evaluate it before training on both the validation and test sets. Let's make sure we make no harm to it.
323
  """
324
 
325
+ eval_metrics = trainer.evaluate(
326
+ eval_dataset=preprocessed_dataset["validation"],
327
+ metric_key_prefix="eval_pretrained",
328
+ max_length=448,
329
+ num_beams=1,
330
+ # gen_kwargs={"key": value} to provide additional generation specific arguments by keyword
331
+ )
332
 
333
+ trainer.log_metrics("eval_pretrained", eval_metrics)
334
+ trainer.save_metrics("eval_pretrained", eval_metrics)
335
+ print(eval_metrics)
336
 
337
+ test_metrics = trainer.evaluate(
338
+ eval_dataset=preprocessed_dataset["test"],
339
+ metric_key_prefix="test_pretrained",
340
+ max_length=448,
341
+ num_beams=1,
342
+ # gen_kwargs={"key": value} to provide additional generation specific arguments by keyword
343
+ )
344
 
345
+ trainer.log_metrics("test_pretrained", test_metrics)
346
+ trainer.save_metrics("test_pretrained", test_metrics)
347
+ print(test_metrics)
348
 
349
+ train_result = trainer.train()
350
+ trainer.save_model()
351
 
352
+ metrics = train_result.metrics
353
+ trainer.log_metrics("train", metrics)
354
+ trainer.save_metrics("train", metrics)
355
+ trainer.save_state()
356
+ print(metrics)
357
 
358
  # """ADD SOMETHING ABOUT THE TRAINING.
359
 
360
  # Now let's evaluate the
361
  # """
362
 
363
+ final_metrics = trainer.evaluate(
364
+ eval_dataset=preprocessed_dataset["test"],
365
+ metric_key_prefix="test_finetuned",
366
+ max_length=448,
367
+ num_beams=1,
368
+ # gen_kwargs={"key": value} to provide additional generation specific arguments by keyword
369
+ )
370
 
371
+ trainer.log_metrics("test_finetuned", final_metrics)
372
+ trainer.save_metrics("test_finetuned", final_metrics)
373
+ print(final_metrics)
374
 
375
  # Pushing to hub during training slows down training
376
  # so we push it only in the end.
test_finetuned_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "test_finetuned_loss": 1.7568330764770508,
4
+ "test_finetuned_runtime": 39.6579,
5
+ "test_finetuned_samples_per_second": 0.101,
6
+ "test_finetuned_steps_per_second": 0.05,
7
+ "test_finetuned_wer": 138.5964912280702
8
+ }
test_pretrained_results.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "test_pretrained_loss": 1.7568330764770508,
3
+ "test_pretrained_runtime": 42.5376,
4
+ "test_pretrained_samples_per_second": 0.094,
5
+ "test_pretrained_steps_per_second": 0.047,
6
+ "test_pretrained_wer": 138.5964912280702
7
+ }
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "epoch": 1.0,
3
  "train_loss": 1.4339025020599365,
4
- "train_runtime": 108.1566,
5
- "train_samples_per_second": 0.074,
6
- "train_steps_per_second": 0.018
7
  }
 
1
  {
2
  "epoch": 1.0,
3
  "train_loss": 1.4339025020599365,
4
+ "train_runtime": 102.2429,
5
+ "train_samples_per_second": 0.078,
6
+ "train_steps_per_second": 0.02
7
  }
trainer_state.json CHANGED
@@ -16,9 +16,9 @@
16
  {
17
  "epoch": 0.5,
18
  "eval_loss": 1.6191972494125366,
19
- "eval_runtime": 41.7522,
20
- "eval_samples_per_second": 0.096,
21
- "eval_steps_per_second": 0.048,
22
  "eval_wer": 153.2258064516129,
23
  "step": 1
24
  },
@@ -31,9 +31,9 @@
31
  {
32
  "epoch": 1.0,
33
  "eval_loss": 1.6191972494125366,
34
- "eval_runtime": 37.2469,
35
- "eval_samples_per_second": 0.107,
36
- "eval_steps_per_second": 0.054,
37
  "eval_wer": 153.2258064516129,
38
  "step": 2
39
  },
@@ -42,9 +42,9 @@
42
  "step": 2,
43
  "total_flos": 196951080960000.0,
44
  "train_loss": 1.4339025020599365,
45
- "train_runtime": 108.1566,
46
- "train_samples_per_second": 0.074,
47
- "train_steps_per_second": 0.018
48
  }
49
  ],
50
  "max_steps": 2,
 
16
  {
17
  "epoch": 0.5,
18
  "eval_loss": 1.6191972494125366,
19
+ "eval_runtime": 38.6993,
20
+ "eval_samples_per_second": 0.103,
21
+ "eval_steps_per_second": 0.052,
22
  "eval_wer": 153.2258064516129,
23
  "step": 1
24
  },
 
31
  {
32
  "epoch": 1.0,
33
  "eval_loss": 1.6191972494125366,
34
+ "eval_runtime": 39.2638,
35
+ "eval_samples_per_second": 0.102,
36
+ "eval_steps_per_second": 0.051,
37
  "eval_wer": 153.2258064516129,
38
  "step": 2
39
  },
 
42
  "step": 2,
43
  "total_flos": 196951080960000.0,
44
  "train_loss": 1.4339025020599365,
45
+ "train_runtime": 102.2429,
46
+ "train_samples_per_second": 0.078,
47
+ "train_steps_per_second": 0.02
48
  }
49
  ],
50
  "max_steps": 2,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:081bb11364a81f07b31bca0107ccb5d62d955d95e115f206089558cf85595e34
3
  size 3579
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9cf1b9241b15d95f3310024d7bec9fb6d139a94c9d760c0be51787d41a93fc3
3
  size 3579