mprzibilla commited on
Commit
027263a
1 Parent(s): 79fb508

Training in progress, epoch 1

Browse files
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4eb118859ccd2b17b7c4ddc77d35f3fe0ef85393a1eeeb72c87b218498a749f7
3
  size 721655813
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15e0b09d6517be8dfbfa192a629bf8c8ebbd7a36b42f6874dc9469ccb08abf3d
3
  size 721655813
last-checkpoint/pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:abc34843e0965196a4479f17cf08bf8a88225eec6c573845d80ec4df06d5bcf4
3
  size 377643361
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:433004263a1040799e6bfe1d5d5cb128be82e2f0212a989bd17392c683139c99
3
  size 377643361
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:769917d52eee0e3ec39baab8d23748c7392a020826ef8ddf1250670f1fa7a2d1
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:706cb5515dfb83acf46311e8872450c873a190e15c985bac9f03ac1601afb4ca
3
  size 14575
last-checkpoint/scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9d06a48b6590730359e992b5a9274a6f0aa5a3415a51fce3457d99a6fd60c656
3
  size 557
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d130c113354872d1ca46ad7aa6c522fdfca3010ff0ae55e7777fc12049fdca1d
3
  size 557
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2bb234e8aff204ba837868e8af06d316f481bb10dedb985a07200d97b524462f
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00e56ff1f9aeef202866d18c52f49cf630af4e847aa9aca70191fe768f73377a
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 8.0,
5
- "global_step": 2576,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -10,143 +10,24 @@
10
  {
11
  "epoch": 1.0,
12
  "learning_rate": 9.83874482458052e-05,
13
- "loss": 22.6211,
14
  "step": 322
15
  },
16
  {
17
  "epoch": 1.0,
18
  "eval_cer": 1.0,
19
- "eval_loss": 3.3536856174468994,
20
  "eval_new_wer": 1.0,
21
  "eval_old_wer": 1.0,
22
- "eval_runtime": 7.7387,
23
- "eval_samples_per_second": 27.136,
24
- "eval_steps_per_second": 3.489,
25
  "step": 322
26
- },
27
- {
28
- "epoch": 2.0,
29
- "learning_rate": 9.13706689910656e-05,
30
- "loss": 3.4051,
31
- "step": 644
32
- },
33
- {
34
- "epoch": 2.0,
35
- "eval_cer": 1.0,
36
- "eval_loss": 3.384406328201294,
37
- "eval_new_wer": 1.0,
38
- "eval_old_wer": 1.0,
39
- "eval_runtime": 7.5183,
40
- "eval_samples_per_second": 27.932,
41
- "eval_steps_per_second": 3.591,
42
- "step": 644
43
- },
44
- {
45
- "epoch": 3.0,
46
- "learning_rate": 8.4353889736326e-05,
47
- "loss": 3.4379,
48
- "step": 966
49
- },
50
- {
51
- "epoch": 3.0,
52
- "eval_cer": 1.0,
53
- "eval_loss": 3.2850329875946045,
54
- "eval_new_wer": 1.0,
55
- "eval_old_wer": 1.0,
56
- "eval_runtime": 7.8755,
57
- "eval_samples_per_second": 26.665,
58
- "eval_steps_per_second": 3.428,
59
- "step": 966
60
- },
61
- {
62
- "epoch": 4.0,
63
- "learning_rate": 7.73371104815864e-05,
64
- "loss": 3.4255,
65
- "step": 1288
66
- },
67
- {
68
- "epoch": 4.0,
69
- "eval_cer": 1.0,
70
- "eval_loss": 3.3971991539001465,
71
- "eval_new_wer": 1.0,
72
- "eval_old_wer": 1.0,
73
- "eval_runtime": 7.5517,
74
- "eval_samples_per_second": 27.808,
75
- "eval_steps_per_second": 3.575,
76
- "step": 1288
77
- },
78
- {
79
- "epoch": 5.0,
80
- "learning_rate": 7.032033122684682e-05,
81
- "loss": 3.4102,
82
- "step": 1610
83
- },
84
- {
85
- "epoch": 5.0,
86
- "eval_cer": 1.0,
87
- "eval_loss": 3.3659400939941406,
88
- "eval_new_wer": 1.0,
89
- "eval_old_wer": 1.0,
90
- "eval_runtime": 7.5128,
91
- "eval_samples_per_second": 27.952,
92
- "eval_steps_per_second": 3.594,
93
- "step": 1610
94
- },
95
- {
96
- "epoch": 6.0,
97
- "learning_rate": 6.330355197210721e-05,
98
- "loss": 3.4039,
99
- "step": 1932
100
- },
101
- {
102
- "epoch": 6.0,
103
- "eval_cer": 1.0,
104
- "eval_loss": 3.3250296115875244,
105
- "eval_new_wer": 1.0,
106
- "eval_old_wer": 1.0,
107
- "eval_runtime": 7.4768,
108
- "eval_samples_per_second": 28.087,
109
- "eval_steps_per_second": 3.611,
110
- "step": 1932
111
- },
112
- {
113
- "epoch": 7.0,
114
- "learning_rate": 5.628677271736762e-05,
115
- "loss": 3.4033,
116
- "step": 2254
117
- },
118
- {
119
- "epoch": 7.0,
120
- "eval_cer": 1.0,
121
- "eval_loss": 3.3770763874053955,
122
- "eval_new_wer": 1.0,
123
- "eval_old_wer": 1.0,
124
- "eval_runtime": 7.4897,
125
- "eval_samples_per_second": 28.038,
126
- "eval_steps_per_second": 3.605,
127
- "step": 2254
128
- },
129
- {
130
- "epoch": 8.0,
131
- "learning_rate": 4.9269993462628025e-05,
132
- "loss": 3.3976,
133
- "step": 2576
134
- },
135
- {
136
- "epoch": 8.0,
137
- "eval_cer": 1.0,
138
- "eval_loss": 3.305764675140381,
139
- "eval_new_wer": 1.0,
140
- "eval_old_wer": 1.0,
141
- "eval_runtime": 7.4908,
142
- "eval_samples_per_second": 28.034,
143
- "eval_steps_per_second": 3.604,
144
- "step": 2576
145
  }
146
  ],
147
  "max_steps": 4830,
148
  "num_train_epochs": 15,
149
- "total_flos": 1.912160410310016e+18,
150
  "trial_name": null,
151
  "trial_params": null
152
  }
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 1.0,
5
+ "global_step": 322,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
 
10
  {
11
  "epoch": 1.0,
12
  "learning_rate": 9.83874482458052e-05,
13
+ "loss": 22.5987,
14
  "step": 322
15
  },
16
  {
17
  "epoch": 1.0,
18
  "eval_cer": 1.0,
19
+ "eval_loss": 3.259235143661499,
20
  "eval_new_wer": 1.0,
21
  "eval_old_wer": 1.0,
22
+ "eval_runtime": 8.6332,
23
+ "eval_samples_per_second": 24.325,
24
+ "eval_steps_per_second": 3.127,
25
  "step": 322
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  }
27
  ],
28
  "max_steps": 4830,
29
  "num_train_epochs": 15,
30
+ "total_flos": 2.38632755188128e+17,
31
  "trial_name": null,
32
  "trial_params": null
33
  }
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:728f47a3aa9aa6097ab096de7ed2d1e438a0a05309a80b28f01c0f3f6e7eaa44
3
  size 3387
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:950b7a3f32c6bae99f90ac51b590f1a0cedbba08cabd0e9d7ec6feb261e68f5e
3
  size 3387
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:abc34843e0965196a4479f17cf08bf8a88225eec6c573845d80ec4df06d5bcf4
3
  size 377643361
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:433004263a1040799e6bfe1d5d5cb128be82e2f0212a989bd17392c683139c99
3
  size 377643361
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:728f47a3aa9aa6097ab096de7ed2d1e438a0a05309a80b28f01c0f3f6e7eaa44
3
  size 3387
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:950b7a3f32c6bae99f90ac51b590f1a0cedbba08cabd0e9d7ec6feb261e68f5e
3
  size 3387