davidilag commited on
Commit
7c82405
1 Parent(s): 93f207b

End of training

Browse files
Files changed (2) hide show
  1. README.md +82 -0
  2. generation_config.json +239 -0
README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: NbAiLab/nb-whisper-tiny
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - samromur_asr
8
+ metrics:
9
+ - wer
10
+ model-index:
11
+ - name: whisper-tiny-no-is-5k-steps
12
+ results:
13
+ - task:
14
+ name: Automatic Speech Recognition
15
+ type: automatic-speech-recognition
16
+ dataset:
17
+ name: samromur_asr
18
+ type: samromur_asr
19
+ config: samromur_asr
20
+ split: test
21
+ args: samromur_asr
22
+ metrics:
23
+ - name: Wer
24
+ type: wer
25
+ value: 51.60346372051359
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/setur/huggingface/runs/blydklsj)
32
+ # whisper-tiny-no-is-5k-steps
33
+
34
+ This model is a fine-tuned version of [NbAiLab/nb-whisper-tiny](https://huggingface.co/NbAiLab/nb-whisper-tiny) on the samromur_asr dataset.
35
+ It achieves the following results on the evaluation set:
36
+ - Loss: 0.7830
37
+ - Wer: 51.6035
38
+
39
+ ## Model description
40
+
41
+ More information needed
42
+
43
+ ## Intended uses & limitations
44
+
45
+ More information needed
46
+
47
+ ## Training and evaluation data
48
+
49
+ More information needed
50
+
51
+ ## Training procedure
52
+
53
+ ### Training hyperparameters
54
+
55
+ The following hyperparameters were used during training:
56
+ - learning_rate: 1e-05
57
+ - train_batch_size: 16
58
+ - eval_batch_size: 8
59
+ - seed: 42
60
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
61
+ - lr_scheduler_type: linear
62
+ - lr_scheduler_warmup_steps: 500
63
+ - training_steps: 5000
64
+ - mixed_precision_training: Native AMP
65
+
66
+ ### Training results
67
+
68
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
69
+ |:-------------:|:------:|:----:|:---------------:|:-------:|
70
+ | 1.2696 | 0.1778 | 1000 | 1.3551 | 69.4512 |
71
+ | 0.9101 | 0.3556 | 2000 | 0.9833 | 60.0096 |
72
+ | 0.7626 | 0.5333 | 3000 | 0.8596 | 54.7590 |
73
+ | 0.7316 | 0.7111 | 4000 | 0.8014 | 52.3165 |
74
+ | 0.7269 | 0.8889 | 5000 | 0.7830 | 51.6035 |
75
+
76
+
77
+ ### Framework versions
78
+
79
+ - Transformers 4.42.3
80
+ - Pytorch 2.3.0+cu121
81
+ - Datasets 2.20.0
82
+ - Tokenizers 0.19.1
generation_config.json ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alignment_heads": [
3
+ [
4
+ 2,
5
+ 2
6
+ ],
7
+ [
8
+ 3,
9
+ 0
10
+ ],
11
+ [
12
+ 3,
13
+ 2
14
+ ],
15
+ [
16
+ 3,
17
+ 3
18
+ ],
19
+ [
20
+ 3,
21
+ 4
22
+ ],
23
+ [
24
+ 3,
25
+ 5
26
+ ]
27
+ ],
28
+ "begin_suppress_tokens": [
29
+ 220,
30
+ 50257
31
+ ],
32
+ "bos_token_id": 50257,
33
+ "decoder_start_token_id": 50258,
34
+ "eos_token_id": 50257,
35
+ "is_multilingual": true,
36
+ "lang_to_id": {
37
+ "<|af|>": 50327,
38
+ "<|am|>": 50334,
39
+ "<|ar|>": 50272,
40
+ "<|as|>": 50350,
41
+ "<|az|>": 50304,
42
+ "<|ba|>": 50355,
43
+ "<|be|>": 50330,
44
+ "<|bg|>": 50292,
45
+ "<|bn|>": 50302,
46
+ "<|bo|>": 50347,
47
+ "<|br|>": 50309,
48
+ "<|bs|>": 50315,
49
+ "<|ca|>": 50270,
50
+ "<|cs|>": 50283,
51
+ "<|cy|>": 50297,
52
+ "<|da|>": 50285,
53
+ "<|de|>": 50261,
54
+ "<|el|>": 50281,
55
+ "<|en|>": 50259,
56
+ "<|es|>": 50262,
57
+ "<|et|>": 50307,
58
+ "<|eu|>": 50310,
59
+ "<|fa|>": 50300,
60
+ "<|fi|>": 50277,
61
+ "<|fo|>": 50338,
62
+ "<|fr|>": 50265,
63
+ "<|gl|>": 50319,
64
+ "<|gu|>": 50333,
65
+ "<|haw|>": 50352,
66
+ "<|ha|>": 50354,
67
+ "<|he|>": 50279,
68
+ "<|hi|>": 50276,
69
+ "<|hr|>": 50291,
70
+ "<|ht|>": 50339,
71
+ "<|hu|>": 50286,
72
+ "<|hy|>": 50312,
73
+ "<|id|>": 50275,
74
+ "<|is|>": 50311,
75
+ "<|it|>": 50274,
76
+ "<|ja|>": 50266,
77
+ "<|jw|>": 50356,
78
+ "<|ka|>": 50329,
79
+ "<|kk|>": 50316,
80
+ "<|km|>": 50323,
81
+ "<|kn|>": 50306,
82
+ "<|ko|>": 50264,
83
+ "<|la|>": 50294,
84
+ "<|lb|>": 50345,
85
+ "<|ln|>": 50353,
86
+ "<|lo|>": 50336,
87
+ "<|lt|>": 50293,
88
+ "<|lv|>": 50301,
89
+ "<|mg|>": 50349,
90
+ "<|mi|>": 50295,
91
+ "<|mk|>": 50308,
92
+ "<|ml|>": 50296,
93
+ "<|mn|>": 50314,
94
+ "<|mr|>": 50320,
95
+ "<|ms|>": 50282,
96
+ "<|mt|>": 50343,
97
+ "<|my|>": 50346,
98
+ "<|ne|>": 50313,
99
+ "<|nl|>": 50271,
100
+ "<|nn|>": 50342,
101
+ "<|no|>": 50288,
102
+ "<|oc|>": 50328,
103
+ "<|pa|>": 50321,
104
+ "<|pl|>": 50269,
105
+ "<|ps|>": 50340,
106
+ "<|pt|>": 50267,
107
+ "<|ro|>": 50284,
108
+ "<|ru|>": 50263,
109
+ "<|sa|>": 50344,
110
+ "<|sd|>": 50332,
111
+ "<|si|>": 50322,
112
+ "<|sk|>": 50298,
113
+ "<|sl|>": 50305,
114
+ "<|sn|>": 50324,
115
+ "<|so|>": 50326,
116
+ "<|sq|>": 50317,
117
+ "<|sr|>": 50303,
118
+ "<|su|>": 50357,
119
+ "<|sv|>": 50273,
120
+ "<|sw|>": 50318,
121
+ "<|ta|>": 50287,
122
+ "<|te|>": 50299,
123
+ "<|tg|>": 50331,
124
+ "<|th|>": 50289,
125
+ "<|tk|>": 50341,
126
+ "<|tl|>": 50348,
127
+ "<|tr|>": 50268,
128
+ "<|tt|>": 50351,
129
+ "<|uk|>": 50280,
130
+ "<|ur|>": 50290,
131
+ "<|uz|>": 50337,
132
+ "<|vi|>": 50278,
133
+ "<|yi|>": 50335,
134
+ "<|yo|>": 50325,
135
+ "<|zh|>": 50260
136
+ },
137
+ "language": "<|is|>",
138
+ "max_initial_timestamp_index": 1,
139
+ "max_length": 448,
140
+ "no_timestamps_token_id": 50363,
141
+ "pad_token_id": 50257,
142
+ "return_timestamps": false,
143
+ "suppress_tokens": [
144
+ 1,
145
+ 2,
146
+ 7,
147
+ 8,
148
+ 9,
149
+ 10,
150
+ 14,
151
+ 25,
152
+ 26,
153
+ 27,
154
+ 28,
155
+ 29,
156
+ 31,
157
+ 58,
158
+ 59,
159
+ 60,
160
+ 61,
161
+ 62,
162
+ 63,
163
+ 90,
164
+ 91,
165
+ 92,
166
+ 93,
167
+ 359,
168
+ 503,
169
+ 522,
170
+ 542,
171
+ 873,
172
+ 893,
173
+ 902,
174
+ 918,
175
+ 922,
176
+ 931,
177
+ 1350,
178
+ 1853,
179
+ 1982,
180
+ 2460,
181
+ 2627,
182
+ 3246,
183
+ 3253,
184
+ 3268,
185
+ 3536,
186
+ 3846,
187
+ 3961,
188
+ 4183,
189
+ 4667,
190
+ 6585,
191
+ 6647,
192
+ 7273,
193
+ 9061,
194
+ 9383,
195
+ 10428,
196
+ 10929,
197
+ 11938,
198
+ 12033,
199
+ 12331,
200
+ 12562,
201
+ 13793,
202
+ 14157,
203
+ 14635,
204
+ 15265,
205
+ 15618,
206
+ 16553,
207
+ 16604,
208
+ 18362,
209
+ 18956,
210
+ 20075,
211
+ 21675,
212
+ 22520,
213
+ 26130,
214
+ 26161,
215
+ 26435,
216
+ 28279,
217
+ 29464,
218
+ 31650,
219
+ 32302,
220
+ 32470,
221
+ 36865,
222
+ 42863,
223
+ 47425,
224
+ 49870,
225
+ 50254,
226
+ 50258,
227
+ 50358,
228
+ 50359,
229
+ 50360,
230
+ 50361,
231
+ 50362
232
+ ],
233
+ "task": "transcribe",
234
+ "task_to_id": {
235
+ "transcribe": 50359,
236
+ "translate": 50358
237
+ },
238
+ "transformers_version": "4.42.3"
239
+ }