5roop commited on
Commit
905e02a
1 Parent(s): 77b03e4

Added model 38

Browse files
Files changed (9) hide show
  1. config.json +32 -3
  2. optimizer.pt +2 -2
  3. pytorch_model.bin +2 -2
  4. rng_state.pth +1 -1
  5. scaler.pt +1 -1
  6. scheduler.pt +1 -1
  7. trainer_state.json +255 -94
  8. training_args.bin +2 -2
  9. vocab.json +3 -1
config.json CHANGED
@@ -1,6 +1,9 @@
1
  {
2
  "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
  "activation_dropout": 0.0,
 
 
 
4
  "apply_spec_augment": true,
5
  "architectures": [
6
  "Wav2Vec2ForCTC"
@@ -58,10 +61,13 @@
58
  "layer_norm_eps": 1e-05,
59
  "layerdrop": 0.0,
60
  "mask_feature_length": 10,
 
61
  "mask_feature_prob": 0.0,
62
  "mask_time_length": 10,
 
63
  "mask_time_prob": 0.05,
64
  "model_type": "wav2vec2",
 
65
  "num_attention_heads": 16,
66
  "num_codevector_groups": 2,
67
  "num_codevectors_per_group": 320,
@@ -70,10 +76,33 @@
70
  "num_feat_extract_layers": 7,
71
  "num_hidden_layers": 24,
72
  "num_negatives": 100,
73
- "pad_token_id": 36,
 
74
  "proj_codevector_dim": 768,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
  "torch_dtype": "float32",
76
- "transformers_version": "4.11.3",
77
  "use_weighted_layer_sum": false,
78
- "vocab_size": 39
 
79
  }
1
  {
2
  "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
  "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
  "apply_spec_augment": true,
8
  "architectures": [
9
  "Wav2Vec2ForCTC"
61
  "layer_norm_eps": 1e-05,
62
  "layerdrop": 0.0,
63
  "mask_feature_length": 10,
64
+ "mask_feature_min_masks": 0,
65
  "mask_feature_prob": 0.0,
66
  "mask_time_length": 10,
67
+ "mask_time_min_masks": 2,
68
  "mask_time_prob": 0.05,
69
  "model_type": "wav2vec2",
70
+ "num_adapter_layers": 3,
71
  "num_attention_heads": 16,
72
  "num_codevector_groups": 2,
73
  "num_codevectors_per_group": 320,
76
  "num_feat_extract_layers": 7,
77
  "num_hidden_layers": 24,
78
  "num_negatives": 100,
79
+ "output_hidden_size": 1024,
80
+ "pad_token_id": 1,
81
  "proj_codevector_dim": 768,
82
+ "tdnn_dilation": [
83
+ 1,
84
+ 2,
85
+ 3,
86
+ 1,
87
+ 1
88
+ ],
89
+ "tdnn_dim": [
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 512,
94
+ 1500
95
+ ],
96
+ "tdnn_kernel": [
97
+ 5,
98
+ 3,
99
+ 3,
100
+ 1,
101
+ 1
102
+ ],
103
  "torch_dtype": "float32",
104
+ "transformers_version": "4.17.0",
105
  "use_weighted_layer_sum": false,
106
+ "vocab_size": 50,
107
+ "xvector_output_dim": 512
108
  }
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:94403fe8c994757aeaec7d1aa29f6eb4939b69e447b973bdfa66315ca28289fc
3
- size 2490378769
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbbd74245a32dfee65153384591d3e157fbaadbfa9ed6ba3e84bde67ecc0d26b
3
+ size 2490469009
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2adb27a665c0f58e24abcea96f3e4924947216cad0eaf5d8890f264304aa5dd
3
- size 1262083569
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8832802a6f0497cac9b5409e1fbc323395d51b9df11522d9b57921ec8537ef6
3
+ size 1262131313
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05b4485ec356bcab49053a2a07a44306616f88219c11bd6cd8c9e1148af4e7d5
3
  size 14503
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3887bd9846cc5801fe950d093508213f86dcae29c59bc632aecab4edfdc9648
3
  size 14503
scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83a01e5ec7c6e9e42baf4502b3592fe5b3a70801cc1b41dc6175e27562b388c4
3
  size 559
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a6f2f23c0d6be14904a75536e0cd71a3982589a865f06899958ca265e06c661
3
  size 559
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acc172b4559bbfe5e4502bb2f0d0f3157e9c54827074c2fd6be9036224745685
3
  size 623
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b50675021671aa65e03d08322cba2953ab219a5178e71b41f5c655d59cdae73
3
  size 623
trainer_state.json CHANGED
@@ -1,151 +1,312 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 7.72454448017149,
5
- "global_step": 3600,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
- "epoch": 0.86,
12
- "learning_rate": 0.0002394,
13
- "loss": 3.2993,
14
- "step": 400
 
 
 
 
15
  },
16
  {
17
- "epoch": 0.86,
18
- "eval_loss": Infinity,
19
- "eval_runtime": 828.7984,
20
- "eval_samples_per_second": 9.006,
21
- "eval_steps_per_second": 1.126,
22
- "eval_wer": 0.36457931084030065,
23
  "step": 400
24
  },
25
  {
26
- "epoch": 1.72,
27
- "learning_rate": 0.0002723048327137546,
28
- "loss": 0.3837,
29
- "step": 800
 
 
 
 
30
  },
31
  {
32
- "epoch": 1.72,
33
- "eval_loss": Infinity,
34
- "eval_runtime": 820.745,
35
- "eval_samples_per_second": 9.094,
36
- "eval_steps_per_second": 1.137,
37
- "eval_wer": 0.25638285452569753,
38
  "step": 800
39
  },
40
  {
41
- "epoch": 2.57,
42
- "learning_rate": 0.00023522304832713752,
43
- "loss": 0.2576,
44
- "step": 1200
 
 
 
 
45
  },
46
  {
47
- "epoch": 2.57,
48
- "eval_loss": Infinity,
49
- "eval_runtime": 816.7249,
50
- "eval_samples_per_second": 9.139,
51
- "eval_steps_per_second": 1.142,
52
- "eval_wer": 0.17959103032697818,
53
  "step": 1200
54
  },
55
  {
56
- "epoch": 3.43,
57
- "learning_rate": 0.0001981412639405204,
58
- "loss": 0.1963,
59
- "step": 1600
 
 
 
 
60
  },
61
  {
62
- "epoch": 3.43,
63
- "eval_loss": Infinity,
64
- "eval_runtime": 819.1322,
65
- "eval_samples_per_second": 9.112,
66
- "eval_steps_per_second": 1.139,
67
- "eval_wer": 0.1695059854696326,
68
  "step": 1600
69
  },
70
  {
71
- "epoch": 4.29,
72
- "learning_rate": 0.00016096654275092934,
73
- "loss": 0.158,
74
- "step": 2000
 
 
 
 
75
  },
76
  {
77
- "epoch": 4.29,
78
- "eval_loss": Infinity,
79
- "eval_runtime": 815.3058,
80
- "eval_samples_per_second": 9.155,
81
- "eval_steps_per_second": 1.144,
82
- "eval_wer": 0.16685675284904763,
83
  "step": 2000
84
  },
85
  {
86
- "epoch": 5.15,
87
- "learning_rate": 0.00012388475836431226,
88
- "loss": 0.125,
89
- "step": 2400
 
 
 
 
90
  },
91
  {
92
- "epoch": 5.15,
93
- "eval_loss": Infinity,
94
- "eval_runtime": 815.712,
95
- "eval_samples_per_second": 9.15,
96
- "eval_steps_per_second": 1.144,
97
- "eval_wer": 0.15157202769570643,
 
 
 
 
 
 
 
98
  "step": 2400
99
  },
100
  {
101
- "epoch": 6.01,
102
- "learning_rate": 8.680297397769517e-05,
103
- "loss": 0.101,
104
- "step": 2800
 
 
 
 
105
  },
106
  {
107
- "epoch": 6.01,
108
- "eval_loss": Infinity,
109
- "eval_runtime": 815.5191,
110
- "eval_samples_per_second": 9.152,
111
- "eval_steps_per_second": 1.144,
112
- "eval_wer": 0.1440015087155263,
113
  "step": 2800
114
  },
115
  {
116
- "epoch": 6.87,
117
- "learning_rate": 4.972118959107806e-05,
118
- "loss": 0.0793,
119
- "step": 3200
 
 
 
 
120
  },
121
  {
122
- "epoch": 6.87,
123
- "eval_loss": Infinity,
124
- "eval_runtime": 816.2609,
125
- "eval_samples_per_second": 9.144,
126
- "eval_steps_per_second": 1.143,
127
- "eval_wer": 0.139223909548912,
128
  "step": 3200
129
  },
130
  {
131
- "epoch": 7.72,
132
- "learning_rate": 1.2639405204460965e-05,
133
- "loss": 0.0642,
134
- "step": 3600
 
 
 
 
135
  },
136
  {
137
- "epoch": 7.72,
138
- "eval_loss": Infinity,
139
- "eval_runtime": 815.052,
140
- "eval_samples_per_second": 9.158,
141
- "eval_steps_per_second": 1.145,
142
- "eval_wer": 0.1354611011827252,
143
  "step": 3600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  }
145
  ],
146
- "max_steps": 3728,
147
  "num_train_epochs": 8,
148
- "total_flos": 4.913815144448316e+19,
149
  "trial_name": null,
150
  "trial_params": null
151
  }
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 7.909466234149218,
5
+ "global_step": 6700,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
9
  "log_history": [
10
  {
11
+ "epoch": 0.4,
12
+ "eval_cer": 0.2550782058484374,
13
+ "eval_loss": 0.9434235095977783,
14
+ "eval_runtime": 65.8936,
15
+ "eval_samples_per_second": 7.588,
16
+ "eval_steps_per_second": 0.956,
17
+ "eval_wer": 0.8986276613768566,
18
+ "step": 335
19
  },
20
  {
21
+ "epoch": 0.47,
22
+ "learning_rate": 0.00023999999999999998,
23
+ "loss": 3.5921,
 
 
 
24
  "step": 400
25
  },
26
  {
27
+ "epoch": 0.79,
28
+ "eval_cer": 0.05617811418940895,
29
+ "eval_loss": 0.2126482129096985,
30
+ "eval_runtime": 65.2511,
31
+ "eval_samples_per_second": 7.663,
32
+ "eval_steps_per_second": 0.966,
33
+ "eval_wer": 0.20076805783023663,
34
+ "step": 670
35
  },
36
  {
37
+ "epoch": 0.94,
38
+ "learning_rate": 0.00028565965583173995,
39
+ "loss": 0.3347,
 
 
 
40
  "step": 800
41
  },
42
  {
43
+ "epoch": 1.19,
44
+ "eval_cer": 0.052689157623961445,
45
+ "eval_loss": 0.19026830792427063,
46
+ "eval_runtime": 66.2853,
47
+ "eval_samples_per_second": 7.543,
48
+ "eval_steps_per_second": 0.95,
49
+ "eval_wer": 0.18907776585531147,
50
+ "step": 1005
51
  },
52
  {
53
+ "epoch": 1.42,
54
+ "learning_rate": 0.0002665391969407266,
55
+ "loss": 0.1948,
 
 
 
56
  "step": 1200
57
  },
58
  {
59
+ "epoch": 1.58,
60
+ "eval_cer": 0.04130569764347595,
61
+ "eval_loss": 0.1462544947862625,
62
+ "eval_runtime": 65.903,
63
+ "eval_samples_per_second": 7.587,
64
+ "eval_steps_per_second": 0.956,
65
+ "eval_wer": 0.13915400688993054,
66
+ "step": 1340
67
  },
68
  {
69
+ "epoch": 1.89,
70
+ "learning_rate": 0.00024741873804971315,
71
+ "loss": 0.1737,
 
 
 
72
  "step": 1600
73
  },
74
  {
75
+ "epoch": 1.98,
76
+ "eval_cer": 0.040093433074125544,
77
+ "eval_loss": 0.1409013271331787,
78
+ "eval_runtime": 66.3053,
79
+ "eval_samples_per_second": 7.541,
80
+ "eval_steps_per_second": 0.95,
81
+ "eval_wer": 0.13599141582425028,
82
+ "step": 1675
83
  },
84
  {
85
+ "epoch": 2.36,
86
+ "learning_rate": 0.00022829827915869978,
87
+ "loss": 0.1466,
 
 
 
88
  "step": 2000
89
  },
90
  {
91
+ "epoch": 2.37,
92
+ "eval_cer": 0.03887131269526822,
93
+ "eval_loss": 0.1429334431886673,
94
+ "eval_runtime": 65.9993,
95
+ "eval_samples_per_second": 7.576,
96
+ "eval_steps_per_second": 0.955,
97
+ "eval_wer": 0.13401479640820016,
98
+ "step": 2010
99
  },
100
  {
101
+ "epoch": 2.77,
102
+ "eval_cer": 0.03858549421956772,
103
+ "eval_loss": 0.1421019285917282,
104
+ "eval_runtime": 66.1016,
105
+ "eval_samples_per_second": 7.564,
106
+ "eval_steps_per_second": 0.953,
107
+ "eval_wer": 0.12678601682950247,
108
+ "step": 2345
109
+ },
110
+ {
111
+ "epoch": 2.83,
112
+ "learning_rate": 0.0002091778202676864,
113
+ "loss": 0.1378,
114
  "step": 2400
115
  },
116
  {
117
+ "epoch": 3.16,
118
+ "eval_cer": 0.03703813212698225,
119
+ "eval_loss": 0.1354704052209854,
120
+ "eval_runtime": 65.9839,
121
+ "eval_samples_per_second": 7.578,
122
+ "eval_steps_per_second": 0.955,
123
+ "eval_wer": 0.12305867735923647,
124
+ "step": 2680
125
  },
126
  {
127
+ "epoch": 3.31,
128
+ "learning_rate": 0.00019005736137667304,
129
+ "loss": 0.1217,
 
 
 
130
  "step": 2800
131
  },
132
  {
133
+ "epoch": 3.56,
134
+ "eval_cer": 0.034948700511516516,
135
+ "eval_loss": 0.13004331290721893,
136
+ "eval_runtime": 66.7534,
137
+ "eval_samples_per_second": 7.49,
138
+ "eval_steps_per_second": 0.944,
139
+ "eval_wer": 0.11176370926752131,
140
+ "step": 3015
141
  },
142
  {
143
+ "epoch": 3.78,
144
+ "learning_rate": 0.00017093690248565967,
145
+ "loss": 0.1121,
 
 
 
146
  "step": 3200
147
  },
148
  {
149
+ "epoch": 3.95,
150
+ "eval_cer": 0.03565831879601431,
151
+ "eval_loss": 0.1295933872461319,
152
+ "eval_runtime": 67.056,
153
+ "eval_samples_per_second": 7.456,
154
+ "eval_steps_per_second": 0.94,
155
+ "eval_wer": 0.11729824363246173,
156
+ "step": 3350
157
  },
158
  {
159
+ "epoch": 4.25,
160
+ "learning_rate": 0.00015181644359464624,
161
+ "loss": 0.1038,
 
 
 
162
  "step": 3600
163
+ },
164
+ {
165
+ "epoch": 4.35,
166
+ "eval_cer": 0.034948700511516516,
167
+ "eval_loss": 0.13280533254146576,
168
+ "eval_runtime": 66.5598,
169
+ "eval_samples_per_second": 7.512,
170
+ "eval_steps_per_second": 0.947,
171
+ "eval_wer": 0.11080363697972553,
172
+ "step": 3685
173
+ },
174
+ {
175
+ "epoch": 4.72,
176
+ "learning_rate": 0.00013269598470363287,
177
+ "loss": 0.0941,
178
+ "step": 4000
179
+ },
180
+ {
181
+ "epoch": 4.75,
182
+ "eval_cer": 0.035214807368203184,
183
+ "eval_loss": 0.12650151550769806,
184
+ "eval_runtime": 66.4767,
185
+ "eval_samples_per_second": 7.521,
186
+ "eval_steps_per_second": 0.948,
187
+ "eval_wer": 0.1124978821934828,
188
+ "step": 4020
189
+ },
190
+ {
191
+ "epoch": 5.14,
192
+ "eval_cer": 0.034012398608359695,
193
+ "eval_loss": 0.1327013224363327,
194
+ "eval_runtime": 66.3907,
195
+ "eval_samples_per_second": 7.531,
196
+ "eval_steps_per_second": 0.949,
197
+ "eval_wer": 0.10718924719037669,
198
+ "step": 4355
199
+ },
200
+ {
201
+ "epoch": 5.19,
202
+ "learning_rate": 0.0001135755258126195,
203
+ "loss": 0.0862,
204
+ "step": 4400
205
+ },
206
+ {
207
+ "epoch": 5.54,
208
+ "eval_cer": 0.0352246631777101,
209
+ "eval_loss": 0.1414576768875122,
210
+ "eval_runtime": 66.6897,
211
+ "eval_samples_per_second": 7.497,
212
+ "eval_steps_per_second": 0.945,
213
+ "eval_wer": 0.11294968091715141,
214
+ "step": 4690
215
+ },
216
+ {
217
+ "epoch": 5.67,
218
+ "learning_rate": 9.44550669216061e-05,
219
+ "loss": 0.0748,
220
+ "step": 4800
221
+ },
222
+ {
223
+ "epoch": 5.93,
224
+ "eval_cer": 0.03466288203581601,
225
+ "eval_loss": 0.13079801201820374,
226
+ "eval_runtime": 66.6475,
227
+ "eval_samples_per_second": 7.502,
228
+ "eval_steps_per_second": 0.945,
229
+ "eval_wer": 0.1101824137346812,
230
+ "step": 5025
231
+ },
232
+ {
233
+ "epoch": 6.14,
234
+ "learning_rate": 7.533460803059272e-05,
235
+ "loss": 0.0727,
236
+ "step": 5200
237
+ },
238
+ {
239
+ "epoch": 6.33,
240
+ "eval_cer": 0.03377585918019377,
241
+ "eval_loss": 0.1386057287454605,
242
+ "eval_runtime": 66.5005,
243
+ "eval_samples_per_second": 7.519,
244
+ "eval_steps_per_second": 0.947,
245
+ "eval_wer": 0.10470435421019936,
246
+ "step": 5360
247
+ },
248
+ {
249
+ "epoch": 6.61,
250
+ "learning_rate": 5.621414913957934e-05,
251
+ "loss": 0.0622,
252
+ "step": 5600
253
+ },
254
+ {
255
+ "epoch": 6.72,
256
+ "eval_cer": 0.0333717709904103,
257
+ "eval_loss": 0.14488892257213593,
258
+ "eval_runtime": 66.3875,
259
+ "eval_samples_per_second": 7.532,
260
+ "eval_steps_per_second": 0.949,
261
+ "eval_wer": 0.10509967809340938,
262
+ "step": 5695
263
+ },
264
+ {
265
+ "epoch": 7.08,
266
+ "learning_rate": 3.7093690248565965e-05,
267
+ "loss": 0.0582,
268
+ "step": 6000
269
+ },
270
+ {
271
+ "epoch": 7.12,
272
+ "eval_cer": 0.03320422222879276,
273
+ "eval_loss": 0.14533209800720215,
274
+ "eval_runtime": 66.3474,
275
+ "eval_samples_per_second": 7.536,
276
+ "eval_steps_per_second": 0.95,
277
+ "eval_wer": 0.10493025357203366,
278
+ "step": 6030
279
+ },
280
+ {
281
+ "epoch": 7.51,
282
+ "eval_cer": 0.03320422222879276,
283
+ "eval_loss": 0.14391696453094482,
284
+ "eval_runtime": 66.4699,
285
+ "eval_samples_per_second": 7.522,
286
+ "eval_steps_per_second": 0.948,
287
+ "eval_wer": 0.10515615293386796,
288
+ "step": 6365
289
+ },
290
+ {
291
+ "epoch": 7.56,
292
+ "learning_rate": 1.8021032504780113e-05,
293
+ "loss": 0.0493,
294
+ "step": 6400
295
+ },
296
+ {
297
+ "epoch": 7.91,
298
+ "eval_cer": 0.03264244108689867,
299
+ "eval_loss": 0.14469173550605774,
300
+ "eval_runtime": 65.8617,
301
+ "eval_samples_per_second": 7.592,
302
+ "eval_steps_per_second": 0.957,
303
+ "eval_wer": 0.10267125995369063,
304
+ "step": 6700
305
  }
306
  ],
307
+ "max_steps": 6776,
308
  "num_train_epochs": 8,
309
+ "total_flos": 2.1224029808181802e+20,
310
  "trial_name": null,
311
  "trial_params": null
312
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e83dad711cbc55efc0a8db3d5cb03c651f7fc58cffbacaf147fa17c9a500477c
3
- size 2799
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:903a958bf893998150c6891a9f369853f085dd31bfb492b34bf63bb942ca1cc7
3
+ size 2991
vocab.json CHANGED
@@ -1 +1,3 @@
1
- {"?": 1, "a": 2, "b": 3, "c": 4, "d": 5, "e": 6, "f": 7, "g": 8, "h": 9, "i": 10, "j": 11, "k": 12, "l": 13, "m": 14, "n": 15, "o": 16, "p": 17, "q": 18, "r": 19, "s": 20, "t": 21, "u": 22, "v": 23, "w": 24, "x": 25, "y": 26, "z": 27, "\u00e4": 28, "\u00fc": 29, "\u0107": 30, "\u010d": 31, "\u0111": 32, "\u0161": 33, "\u017e": 34, "|": 0, "[UNK]": 35, "[PAD]": 36}
 
 
1
+ {" ": 0, "[PAD]": 1, "[UNK]": 2, "a": 3, "b": 4, "c": 5, "d": 6, "e": 7, "f": 8, "g": 9, "h": 10, "i": 11, "j": 12, "k": 13, "l": 14, "m": 15, "n": 16, "o": 17, "p": 18, "q": 19, "r": 20, "s": 21, "t": 22, "u": 23, "v": 24, "w": 25, "x": 26, "y": 27, "z": 28, "\u00e4": 29, "\u00fc": 30, "\u0107": 31, "\u010d": 32, "\u0111": 33, "\u0161": 34, "\u017e": 35, "\u04e7": 36,
2
+ "1":37, "2": 38,"3": 39,"4": 40,"5": 41,"6": 42,"7": 43,"8": 44,"9": 45,"0": 46,
3
+ ".": 47 }