Silemo commited on
Commit
a4db18d
·
1 Parent(s): 8048f72

Training in progress, step 300, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fe3597c122a2e5567b331b531c1b0cf98c2606dfe5fc98bc664181d12b2b83cd
3
  size 966995080
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f51bb8284f60e083a30ceea4f42d89b604626ee09af43b6a30bc15d987e79a3
3
  size 966995080
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5feb1207165a2472ed0fc4a2352d29febeb73effc6fad6dbd900bc416ac92d15
3
  size 1925064044
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3cd8885865d23f805fde3474a7d056599f20ef2eec20b7c8aa1b5c7c56e6ba64
3
  size 1925064044
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4fd91acef56149d62ef60f134f6bef8f3143b1426e8731dcb1c5449312d3ea8c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49f603c8d0789e2771e72e17a35d09664975138faec34bc1a21798b82a6ebaa3
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4f9b9e6716d4845c0461a67674925be51cb5bd879f6bddb03b17fa941754a7de
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1f80a908bd128972cf50fa650e97f4d24517acd1cb1cf259dd62d8599405ec1
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 176.42124237382143,
3
- "best_model_checkpoint": "./whisper-it/checkpoint-100",
4
- "epoch": 0.19083969465648856,
5
  "eval_steps": 100,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -40,6 +40,72 @@
40
  "eval_steps_per_second": 0.095,
41
  "eval_wer": 176.42124237382143,
42
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  }
44
  ],
45
  "logging_steps": 25,
@@ -47,7 +113,7 @@
47
  "num_input_tokens_seen": 0,
48
  "num_train_epochs": 8,
49
  "save_steps": 100,
50
- "total_flos": 9.23473281024e+17,
51
  "trial_name": null,
52
  "trial_params": null
53
  }
 
1
  {
2
+ "best_metric": 70.1955074875208,
3
+ "best_model_checkpoint": "./whisper-it/checkpoint-300",
4
+ "epoch": 0.5725190839694656,
5
  "eval_steps": 100,
6
+ "global_step": 300,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
40
  "eval_steps_per_second": 0.095,
41
  "eval_wer": 176.42124237382143,
42
  "step": 100
43
+ },
44
+ {
45
+ "epoch": 0.24,
46
+ "learning_rate": 2.4000000000000003e-06,
47
+ "loss": 0.9794,
48
+ "step": 125
49
+ },
50
+ {
51
+ "epoch": 0.29,
52
+ "learning_rate": 2.9e-06,
53
+ "loss": 0.8642,
54
+ "step": 150
55
+ },
56
+ {
57
+ "epoch": 0.33,
58
+ "learning_rate": 3.4000000000000005e-06,
59
+ "loss": 0.8044,
60
+ "step": 175
61
+ },
62
+ {
63
+ "epoch": 0.38,
64
+ "learning_rate": 3.88e-06,
65
+ "loss": 0.7389,
66
+ "step": 200
67
+ },
68
+ {
69
+ "epoch": 0.38,
70
+ "eval_loss": 0.8330782055854797,
71
+ "eval_runtime": 1755.11,
72
+ "eval_samples_per_second": 0.855,
73
+ "eval_steps_per_second": 0.107,
74
+ "eval_wer": 80.49084858569051,
75
+ "step": 200
76
+ },
77
+ {
78
+ "epoch": 0.43,
79
+ "learning_rate": 4.38e-06,
80
+ "loss": 0.6293,
81
+ "step": 225
82
+ },
83
+ {
84
+ "epoch": 0.48,
85
+ "learning_rate": 4.880000000000001e-06,
86
+ "loss": 0.5066,
87
+ "step": 250
88
+ },
89
+ {
90
+ "epoch": 0.52,
91
+ "learning_rate": 5.380000000000001e-06,
92
+ "loss": 0.3526,
93
+ "step": 275
94
+ },
95
+ {
96
+ "epoch": 0.57,
97
+ "learning_rate": 5.8800000000000005e-06,
98
+ "loss": 0.2951,
99
+ "step": 300
100
+ },
101
+ {
102
+ "epoch": 0.57,
103
+ "eval_loss": 0.4260523319244385,
104
+ "eval_runtime": 1708.1953,
105
+ "eval_samples_per_second": 0.878,
106
+ "eval_steps_per_second": 0.11,
107
+ "eval_wer": 70.1955074875208,
108
+ "step": 300
109
  }
110
  ],
111
  "logging_steps": 25,
 
113
  "num_input_tokens_seen": 0,
114
  "num_train_epochs": 8,
115
  "save_steps": 100,
116
+ "total_flos": 2.770419843072e+18,
117
  "trial_name": null,
118
  "trial_params": null
119
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0deaaf9b4dc785b95bcfecba732d9f20738cd6d99cb5f2d674c46008c563ff98
3
- size 4856
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a6afd188b1bb5040b1e1647512623a4ea330124b3938572fb5350dd1ea4ab41d
3
+ size 4792