csikasote commited on
Commit
cb72cb1
·
verified ·
1 Parent(s): 101da2b

End of training

Browse files
Files changed (5) hide show
  1. README.md +3 -1
  2. all_results.json +15 -0
  3. eval_results.json +9 -0
  4. train_results.json +9 -0
  5. trainer_state.json +243 -0
README.md CHANGED
@@ -3,6 +3,8 @@ library_name: transformers
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-xls-r-1b
5
  tags:
 
 
6
  - generated_from_trainer
7
  metrics:
8
  - wer
@@ -16,7 +18,7 @@ should probably proofread and complete it, then remove this comment. -->
16
 
17
  # xls-r-1b-bemgen-combined-model
18
 
19
- This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on an unknown dataset.
20
  It achieves the following results on the evaluation set:
21
  - Loss: 0.2509
22
  - Wer: 0.3923
 
3
  license: apache-2.0
4
  base_model: facebook/wav2vec2-xls-r-1b
5
  tags:
6
+ - automatic-speech-recognition
7
+ - bemgen
8
  - generated_from_trainer
9
  metrics:
10
  - wer
 
18
 
19
  # xls-r-1b-bemgen-combined-model
20
 
21
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-1b](https://huggingface.co/facebook/wav2vec2-xls-r-1b) on the BEMGEN - NA dataset.
22
  It achieves the following results on the evaluation set:
23
  - Loss: 0.2509
24
  - Wer: 0.3923
all_results.json ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.3871543264942017,
3
+ "eval_loss": 0.25092917680740356,
4
+ "eval_runtime": 39.3291,
5
+ "eval_samples": 541,
6
+ "eval_samples_per_second": 13.756,
7
+ "eval_steps_per_second": 3.458,
8
+ "eval_wer": 0.39230210602759624,
9
+ "total_flos": 1.1803978917941375e+19,
10
+ "train_loss": 1.775066086618524,
11
+ "train_runtime": 3182.4103,
12
+ "train_samples": 4482,
13
+ "train_samples_per_second": 42.251,
14
+ "train_steps_per_second": 5.279
15
+ }
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.3871543264942017,
3
+ "eval_loss": 0.25092917680740356,
4
+ "eval_runtime": 39.3291,
5
+ "eval_samples": 541,
6
+ "eval_samples_per_second": 13.756,
7
+ "eval_steps_per_second": 3.458,
8
+ "eval_wer": 0.39230210602759624
9
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 3.3871543264942017,
3
+ "total_flos": 1.1803978917941375e+19,
4
+ "train_loss": 1.775066086618524,
5
+ "train_runtime": 3182.4103,
6
+ "train_samples": 4482,
7
+ "train_samples_per_second": 42.251,
8
+ "train_steps_per_second": 5.279
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.24462078511714935,
3
+ "best_model_checkpoint": "/scratch/skscla001/speech/results/xls-r-1b-bemgen-combined-model/checkpoint-1500",
4
+ "epoch": 3.3871543264942017,
5
+ "eval_steps": 100,
6
+ "global_step": 1900,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.1784121320249777,
13
+ "eval_loss": 3.441302537918091,
14
+ "eval_runtime": 39.9204,
15
+ "eval_samples_per_second": 13.552,
16
+ "eval_steps_per_second": 3.407,
17
+ "eval_wer": 1.0002904865649964,
18
+ "step": 100
19
+ },
20
+ {
21
+ "epoch": 0.3568242640499554,
22
+ "eval_loss": 2.914942979812622,
23
+ "eval_runtime": 39.4152,
24
+ "eval_samples_per_second": 13.726,
25
+ "eval_steps_per_second": 3.45,
26
+ "eval_wer": 1.0,
27
+ "step": 200
28
+ },
29
+ {
30
+ "epoch": 0.5352363960749331,
31
+ "eval_loss": 0.7767665386199951,
32
+ "eval_runtime": 39.9772,
33
+ "eval_samples_per_second": 13.533,
34
+ "eval_steps_per_second": 3.402,
35
+ "eval_wer": 0.9234567901234568,
36
+ "step": 300
37
+ },
38
+ {
39
+ "epoch": 0.7136485280999108,
40
+ "eval_loss": 0.605692446231842,
41
+ "eval_runtime": 39.422,
42
+ "eval_samples_per_second": 13.723,
43
+ "eval_steps_per_second": 3.45,
44
+ "eval_wer": 0.904720406681191,
45
+ "step": 400
46
+ },
47
+ {
48
+ "epoch": 0.8920606601248885,
49
+ "grad_norm": 3.2083628177642822,
50
+ "learning_rate": 2.976e-05,
51
+ "loss": 5.3372,
52
+ "step": 500
53
+ },
54
+ {
55
+ "epoch": 0.8920606601248885,
56
+ "eval_loss": 0.43167629837989807,
57
+ "eval_runtime": 39.6193,
58
+ "eval_samples_per_second": 13.655,
59
+ "eval_steps_per_second": 3.433,
60
+ "eval_wer": 0.6720406681190995,
61
+ "step": 500
62
+ },
63
+ {
64
+ "epoch": 1.0695807314897412,
65
+ "eval_loss": 0.3997327983379364,
66
+ "eval_runtime": 39.7537,
67
+ "eval_samples_per_second": 13.609,
68
+ "eval_steps_per_second": 3.421,
69
+ "eval_wer": 0.6704429920116195,
70
+ "step": 600
71
+ },
72
+ {
73
+ "epoch": 1.247992863514719,
74
+ "eval_loss": 0.3610936999320984,
75
+ "eval_runtime": 39.4379,
76
+ "eval_samples_per_second": 13.718,
77
+ "eval_steps_per_second": 3.448,
78
+ "eval_wer": 0.6405228758169934,
79
+ "step": 700
80
+ },
81
+ {
82
+ "epoch": 1.4264049955396967,
83
+ "eval_loss": 0.34406763315200806,
84
+ "eval_runtime": 39.4181,
85
+ "eval_samples_per_second": 13.725,
86
+ "eval_steps_per_second": 3.45,
87
+ "eval_wer": 0.5603485838779957,
88
+ "step": 800
89
+ },
90
+ {
91
+ "epoch": 1.6048171275646745,
92
+ "eval_loss": 0.2945486903190613,
93
+ "eval_runtime": 39.8239,
94
+ "eval_samples_per_second": 13.585,
95
+ "eval_steps_per_second": 3.415,
96
+ "eval_wer": 0.49135802469135803,
97
+ "step": 900
98
+ },
99
+ {
100
+ "epoch": 1.783229259589652,
101
+ "grad_norm": 3.3745229244232178,
102
+ "learning_rate": 2.908711656441718e-05,
103
+ "loss": 0.6459,
104
+ "step": 1000
105
+ },
106
+ {
107
+ "epoch": 1.783229259589652,
108
+ "eval_loss": 0.3041314482688904,
109
+ "eval_runtime": 39.339,
110
+ "eval_samples_per_second": 13.752,
111
+ "eval_steps_per_second": 3.457,
112
+ "eval_wer": 0.4923747276688453,
113
+ "step": 1000
114
+ },
115
+ {
116
+ "epoch": 1.9616413916146298,
117
+ "eval_loss": 0.2805267870426178,
118
+ "eval_runtime": 39.2725,
119
+ "eval_samples_per_second": 13.776,
120
+ "eval_steps_per_second": 3.463,
121
+ "eval_wer": 0.46811909949164854,
122
+ "step": 1100
123
+ },
124
+ {
125
+ "epoch": 2.1391614629794824,
126
+ "eval_loss": 0.2774401605129242,
127
+ "eval_runtime": 39.4398,
128
+ "eval_samples_per_second": 13.717,
129
+ "eval_steps_per_second": 3.448,
130
+ "eval_wer": 0.5108206245461148,
131
+ "step": 1200
132
+ },
133
+ {
134
+ "epoch": 2.3175735950044603,
135
+ "eval_loss": 0.26826903223991394,
136
+ "eval_runtime": 39.6616,
137
+ "eval_samples_per_second": 13.64,
138
+ "eval_steps_per_second": 3.429,
139
+ "eval_wer": 0.42541757443718226,
140
+ "step": 1300
141
+ },
142
+ {
143
+ "epoch": 2.495985727029438,
144
+ "eval_loss": 0.2643994987010956,
145
+ "eval_runtime": 39.3737,
146
+ "eval_samples_per_second": 13.74,
147
+ "eval_steps_per_second": 3.454,
148
+ "eval_wer": 0.4381989832970225,
149
+ "step": 1400
150
+ },
151
+ {
152
+ "epoch": 2.674397859054416,
153
+ "grad_norm": 2.350830554962158,
154
+ "learning_rate": 2.816687116564417e-05,
155
+ "loss": 0.4599,
156
+ "step": 1500
157
+ },
158
+ {
159
+ "epoch": 2.674397859054416,
160
+ "eval_loss": 0.24462078511714935,
161
+ "eval_runtime": 39.5477,
162
+ "eval_samples_per_second": 13.68,
163
+ "eval_steps_per_second": 3.439,
164
+ "eval_wer": 0.4142338416848221,
165
+ "step": 1500
166
+ },
167
+ {
168
+ "epoch": 2.8528099910793934,
169
+ "eval_loss": 0.24734348058700562,
170
+ "eval_runtime": 39.4462,
171
+ "eval_samples_per_second": 13.715,
172
+ "eval_steps_per_second": 3.448,
173
+ "eval_wer": 0.4117647058823529,
174
+ "step": 1600
175
+ },
176
+ {
177
+ "epoch": 3.0303300624442464,
178
+ "eval_loss": 0.24915921688079834,
179
+ "eval_runtime": 39.9106,
180
+ "eval_samples_per_second": 13.555,
181
+ "eval_steps_per_second": 3.408,
182
+ "eval_wer": 0.396078431372549,
183
+ "step": 1700
184
+ },
185
+ {
186
+ "epoch": 3.208742194469224,
187
+ "eval_loss": 0.24668627977371216,
188
+ "eval_runtime": 39.4647,
189
+ "eval_samples_per_second": 13.708,
190
+ "eval_steps_per_second": 3.446,
191
+ "eval_wer": 0.40697167755991287,
192
+ "step": 1800
193
+ },
194
+ {
195
+ "epoch": 3.3871543264942017,
196
+ "eval_loss": 0.25092557072639465,
197
+ "eval_runtime": 39.3684,
198
+ "eval_samples_per_second": 13.742,
199
+ "eval_steps_per_second": 3.455,
200
+ "eval_wer": 0.39230210602759624,
201
+ "step": 1900
202
+ },
203
+ {
204
+ "epoch": 3.3871543264942017,
205
+ "step": 1900,
206
+ "total_flos": 1.1803978917941375e+19,
207
+ "train_loss": 1.775066086618524,
208
+ "train_runtime": 3182.4103,
209
+ "train_samples_per_second": 42.251,
210
+ "train_steps_per_second": 5.279
211
+ }
212
+ ],
213
+ "logging_steps": 500,
214
+ "max_steps": 16800,
215
+ "num_input_tokens_seen": 0,
216
+ "num_train_epochs": 30,
217
+ "save_steps": 400,
218
+ "stateful_callbacks": {
219
+ "EarlyStoppingCallback": {
220
+ "args": {
221
+ "early_stopping_patience": 4,
222
+ "early_stopping_threshold": 0.0
223
+ },
224
+ "attributes": {
225
+ "early_stopping_patience_counter": 1
226
+ }
227
+ },
228
+ "TrainerControl": {
229
+ "args": {
230
+ "should_epoch_stop": false,
231
+ "should_evaluate": false,
232
+ "should_log": false,
233
+ "should_save": true,
234
+ "should_training_stop": false
235
+ },
236
+ "attributes": {}
237
+ }
238
+ },
239
+ "total_flos": 1.1803978917941375e+19,
240
+ "train_batch_size": 4,
241
+ "trial_name": null,
242
+ "trial_params": null
243
+ }