samitizerxu commited on
Commit
eee07e6
1 Parent(s): d9fe652

Added model files

Browse files
README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - es
4
+ license: apache-2.0
5
+ tags:
6
+ - automatic-speech-recognition
7
+ - common_voice
8
+ - generated_from_trainer
9
+ datasets:
10
+ - common_voice
11
+ model-index:
12
+ - name: wav2vec2-cls-r-300m-es
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # wav2vec2-cls-r-300m-es
20
+
21
+ This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the COMMON_VOICE - ES dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.5160
24
+ - Wer: 0.4016
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 0.0003
44
+ - train_batch_size: 16
45
+ - eval_batch_size: 8
46
+ - seed: 42
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: linear
49
+ - num_epochs: 8.0
50
+ - mixed_precision_training: Native AMP
51
+
52
+ ### Training results
53
+
54
+ | Training Loss | Epoch | Step | Validation Loss | Wer |
55
+ |:-------------:|:-----:|:----:|:---------------:|:------:|
56
+ | 3.1277 | 1.14 | 500 | 2.0259 | 0.9999 |
57
+ | 1.4111 | 2.28 | 1000 | 1.1251 | 0.8894 |
58
+ | 0.8461 | 3.42 | 1500 | 0.8205 | 0.7244 |
59
+ | 0.5042 | 4.57 | 2000 | 0.6116 | 0.5463 |
60
+ | 0.3072 | 5.71 | 2500 | 0.5507 | 0.4506 |
61
+ | 0.2181 | 6.85 | 3000 | 0.5213 | 0.4177 |
62
+ | 0.1608 | 7.99 | 3500 | 0.5161 | 0.4019 |
63
+
64
+
65
+ ### Framework versions
66
+
67
+ - Transformers 4.17.0.dev0
68
+ - Pytorch 1.10.2+cu102
69
+ - Datasets 1.18.2.dev0
70
+ - Tokenizers 0.11.0
added_tokens.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"<s>": 68, "</s>": 69}
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_loss": 0.5159734487533569,
4
+ "eval_runtime": 175.5351,
5
+ "eval_samples": 3500,
6
+ "eval_samples_per_second": 19.939,
7
+ "eval_steps_per_second": 2.495,
8
+ "eval_wer": 0.4015755133948706,
9
+ "train_loss": 0.9386324367770865,
10
+ "train_runtime": 4216.4813,
11
+ "train_samples": 7000,
12
+ "train_samples_per_second": 13.281,
13
+ "train_steps_per_second": 0.831
14
+ }
checkpoint-3450/config.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
+ "activation_dropout": 0.0,
4
+ "adapter_kernel_size": 3,
5
+ "adapter_stride": 2,
6
+ "add_adapter": false,
7
+ "apply_spec_augment": true,
8
+ "architectures": [
9
+ "Wav2Vec2ForCTC"
10
+ ],
11
+ "attention_dropout": 0.0,
12
+ "bos_token_id": 1,
13
+ "classifier_proj_size": 256,
14
+ "codevector_dim": 768,
15
+ "contrastive_logits_temperature": 0.1,
16
+ "conv_bias": true,
17
+ "conv_dim": [
18
+ 512,
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512
25
+ ],
26
+ "conv_kernel": [
27
+ 10,
28
+ 3,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 2,
33
+ 2
34
+ ],
35
+ "conv_stride": [
36
+ 5,
37
+ 2,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2
43
+ ],
44
+ "ctc_loss_reduction": "mean",
45
+ "ctc_zero_infinity": false,
46
+ "diversity_loss_weight": 0.1,
47
+ "do_stable_layer_norm": true,
48
+ "eos_token_id": 2,
49
+ "feat_extract_activation": "gelu",
50
+ "feat_extract_dropout": 0.0,
51
+ "feat_extract_norm": "layer",
52
+ "feat_proj_dropout": 0.0,
53
+ "feat_quantizer_dropout": 0.0,
54
+ "final_dropout": 0.0,
55
+ "hidden_act": "gelu",
56
+ "hidden_dropout": 0.0,
57
+ "hidden_size": 1024,
58
+ "initializer_range": 0.02,
59
+ "intermediate_size": 4096,
60
+ "layer_norm_eps": 1e-05,
61
+ "layerdrop": 0.0,
62
+ "mask_feature_length": 10,
63
+ "mask_feature_min_masks": 0,
64
+ "mask_feature_prob": 0.0,
65
+ "mask_time_length": 10,
66
+ "mask_time_min_masks": 2,
67
+ "mask_time_prob": 0.05,
68
+ "model_type": "wav2vec2",
69
+ "num_adapter_layers": 3,
70
+ "num_attention_heads": 16,
71
+ "num_codevector_groups": 2,
72
+ "num_codevectors_per_group": 320,
73
+ "num_conv_pos_embedding_groups": 16,
74
+ "num_conv_pos_embeddings": 128,
75
+ "num_feat_extract_layers": 7,
76
+ "num_hidden_layers": 24,
77
+ "num_negatives": 100,
78
+ "output_hidden_size": 1024,
79
+ "pad_token_id": 67,
80
+ "proj_codevector_dim": 768,
81
+ "tdnn_dilation": [
82
+ 1,
83
+ 2,
84
+ 3,
85
+ 1,
86
+ 1
87
+ ],
88
+ "tdnn_dim": [
89
+ 512,
90
+ 512,
91
+ 512,
92
+ 512,
93
+ 1500
94
+ ],
95
+ "tdnn_kernel": [
96
+ 5,
97
+ 3,
98
+ 3,
99
+ 1,
100
+ 1
101
+ ],
102
+ "torch_dtype": "float32",
103
+ "transformers_version": "4.17.0.dev0",
104
+ "use_weighted_layer_sum": false,
105
+ "vocab_size": 70,
106
+ "xvector_output_dim": 512
107
+ }
checkpoint-3450/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43266183f86e759c109db8d25ef69db7700d7c0c9a89a1d78ff11dda197374f5
3
+ size 2490632977
checkpoint-3450/preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
checkpoint-3450/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fea34cae97ec757af794f7419a4e2f7bbd645ed8cc144687e02e3afc30c5b36a
3
+ size 1262210673
checkpoint-3450/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2245792f4de9a99b157a2197b3c8813d18ae12216a3507290241bc22f3e987df
3
+ size 14567
checkpoint-3450/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c7e0b4359f8962098d8fd507a364d1ce043548ef8362daf6cdbbc79fb55c984
3
+ size 559
checkpoint-3450/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06bb2293e52a1e21a65c4c31cf2cd9297e007731fa2cc5decd28c3df11b31ec2
3
+ size 623
checkpoint-3450/trainer_state.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 7.876712328767123,
5
+ "global_step": 3450,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.14,
12
+ "learning_rate": 0.0002574486301369863,
13
+ "loss": 3.1277,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.14,
18
+ "eval_loss": 2.02591609954834,
19
+ "eval_runtime": 179.7935,
20
+ "eval_samples_per_second": 19.467,
21
+ "eval_steps_per_second": 2.436,
22
+ "eval_wer": 0.9999398659009592,
23
+ "step": 500
24
+ },
25
+ {
26
+ "epoch": 2.28,
27
+ "learning_rate": 0.0002146404109589041,
28
+ "loss": 1.4111,
29
+ "step": 1000
30
+ },
31
+ {
32
+ "epoch": 2.28,
33
+ "eval_loss": 1.125086784362793,
34
+ "eval_runtime": 179.8421,
35
+ "eval_samples_per_second": 19.462,
36
+ "eval_steps_per_second": 2.435,
37
+ "eval_wer": 0.889383324814336,
38
+ "step": 1000
39
+ },
40
+ {
41
+ "epoch": 3.42,
42
+ "learning_rate": 0.00017183219178082193,
43
+ "loss": 0.8461,
44
+ "step": 1500
45
+ },
46
+ {
47
+ "epoch": 3.42,
48
+ "eval_loss": 0.8204647302627563,
49
+ "eval_runtime": 183.3614,
50
+ "eval_samples_per_second": 19.088,
51
+ "eval_steps_per_second": 2.389,
52
+ "eval_wer": 0.724375357046213,
53
+ "step": 1500
54
+ },
55
+ {
56
+ "epoch": 4.57,
57
+ "learning_rate": 0.00012902397260273972,
58
+ "loss": 0.5042,
59
+ "step": 2000
60
+ },
61
+ {
62
+ "epoch": 4.57,
63
+ "eval_loss": 0.6116402745246887,
64
+ "eval_runtime": 180.1904,
65
+ "eval_samples_per_second": 19.424,
66
+ "eval_steps_per_second": 2.431,
67
+ "eval_wer": 0.5462882227367029,
68
+ "step": 2000
69
+ },
70
+ {
71
+ "epoch": 5.71,
72
+ "learning_rate": 8.621575342465752e-05,
73
+ "loss": 0.3072,
74
+ "step": 2500
75
+ },
76
+ {
77
+ "epoch": 5.71,
78
+ "eval_loss": 0.5506712198257446,
79
+ "eval_runtime": 180.8043,
80
+ "eval_samples_per_second": 19.358,
81
+ "eval_steps_per_second": 2.423,
82
+ "eval_wer": 0.45055473706365196,
83
+ "step": 2500
84
+ },
85
+ {
86
+ "epoch": 6.85,
87
+ "learning_rate": 4.340753424657534e-05,
88
+ "loss": 0.2181,
89
+ "step": 3000
90
+ },
91
+ {
92
+ "epoch": 6.85,
93
+ "eval_loss": 0.5213309526443481,
94
+ "eval_runtime": 179.0608,
95
+ "eval_samples_per_second": 19.546,
96
+ "eval_steps_per_second": 2.446,
97
+ "eval_wer": 0.4177215189873418,
98
+ "step": 3000
99
+ }
100
+ ],
101
+ "max_steps": 3504,
102
+ "num_train_epochs": 8,
103
+ "total_flos": 7.313890427943137e+18,
104
+ "trial_name": null,
105
+ "trial_params": null
106
+ }
checkpoint-3450/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a24f7a88f43f1dc912b86bff41a4f4a9653d396ed16824971198ad28a36c4c8
3
+ size 3055
config.json CHANGED
@@ -1,12 +1,12 @@
1
  {
2
- "_name_or_path": "./wav2vec2-cls-r-300m-es",
3
  "activation_dropout": 0.0,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
6
  "add_adapter": false,
7
  "apply_spec_augment": true,
8
  "architectures": [
9
- "Wav2Vec2Model"
10
  ],
11
  "attention_dropout": 0.0,
12
  "bos_token_id": 1,
 
1
  {
2
+ "_name_or_path": "facebook/wav2vec2-xls-r-300m",
3
  "activation_dropout": 0.0,
4
  "adapter_kernel_size": 3,
5
  "adapter_stride": 2,
6
  "add_adapter": false,
7
  "apply_spec_augment": true,
8
  "architectures": [
9
+ "Wav2Vec2ForCTC"
10
  ],
11
  "attention_dropout": 0.0,
12
  "bos_token_id": 1,
eval_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "eval_loss": 0.5159734487533569,
4
+ "eval_runtime": 175.5351,
5
+ "eval_samples": 3500,
6
+ "eval_samples_per_second": 19.939,
7
+ "eval_steps_per_second": 2.495,
8
+ "eval_wer": 0.4015755133948706
9
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
+ "feature_size": 1,
5
+ "padding_side": "right",
6
+ "padding_value": 0,
7
+ "return_attention_mask": true,
8
+ "sampling_rate": 16000
9
+ }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7f568ca58788b1feb21fa78f4d182d7becf4ff4b3b4bfaf6eaf9878b6730aef1
3
- size 1261910005
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8aec1df821abafa72292bb7a6417d56e0e31d55cf5b4e974671f377d5d8241d1
3
+ size 1262210673
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "./wav2vec2-cls-r-300m-es", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 8.0,
3
+ "train_loss": 0.9386324367770865,
4
+ "train_runtime": 4216.4813,
5
+ "train_samples": 7000,
6
+ "train_samples_per_second": 13.281,
7
+ "train_steps_per_second": 0.831
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 8.0,
5
+ "global_step": 3504,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.14,
12
+ "learning_rate": 0.0002574486301369863,
13
+ "loss": 3.1277,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.14,
18
+ "eval_loss": 2.02591609954834,
19
+ "eval_runtime": 179.7935,
20
+ "eval_samples_per_second": 19.467,
21
+ "eval_steps_per_second": 2.436,
22
+ "eval_wer": 0.9999398659009592,
23
+ "step": 500
24
+ },
25
+ {
26
+ "epoch": 2.28,
27
+ "learning_rate": 0.0002146404109589041,
28
+ "loss": 1.4111,
29
+ "step": 1000
30
+ },
31
+ {
32
+ "epoch": 2.28,
33
+ "eval_loss": 1.125086784362793,
34
+ "eval_runtime": 179.8421,
35
+ "eval_samples_per_second": 19.462,
36
+ "eval_steps_per_second": 2.435,
37
+ "eval_wer": 0.889383324814336,
38
+ "step": 1000
39
+ },
40
+ {
41
+ "epoch": 3.42,
42
+ "learning_rate": 0.00017183219178082193,
43
+ "loss": 0.8461,
44
+ "step": 1500
45
+ },
46
+ {
47
+ "epoch": 3.42,
48
+ "eval_loss": 0.8204647302627563,
49
+ "eval_runtime": 183.3614,
50
+ "eval_samples_per_second": 19.088,
51
+ "eval_steps_per_second": 2.389,
52
+ "eval_wer": 0.724375357046213,
53
+ "step": 1500
54
+ },
55
+ {
56
+ "epoch": 4.57,
57
+ "learning_rate": 0.00012902397260273972,
58
+ "loss": 0.5042,
59
+ "step": 2000
60
+ },
61
+ {
62
+ "epoch": 4.57,
63
+ "eval_loss": 0.6116402745246887,
64
+ "eval_runtime": 180.1904,
65
+ "eval_samples_per_second": 19.424,
66
+ "eval_steps_per_second": 2.431,
67
+ "eval_wer": 0.5462882227367029,
68
+ "step": 2000
69
+ },
70
+ {
71
+ "epoch": 5.71,
72
+ "learning_rate": 8.621575342465752e-05,
73
+ "loss": 0.3072,
74
+ "step": 2500
75
+ },
76
+ {
77
+ "epoch": 5.71,
78
+ "eval_loss": 0.5506712198257446,
79
+ "eval_runtime": 180.8043,
80
+ "eval_samples_per_second": 19.358,
81
+ "eval_steps_per_second": 2.423,
82
+ "eval_wer": 0.45055473706365196,
83
+ "step": 2500
84
+ },
85
+ {
86
+ "epoch": 6.85,
87
+ "learning_rate": 4.340753424657534e-05,
88
+ "loss": 0.2181,
89
+ "step": 3000
90
+ },
91
+ {
92
+ "epoch": 6.85,
93
+ "eval_loss": 0.5213309526443481,
94
+ "eval_runtime": 179.0608,
95
+ "eval_samples_per_second": 19.546,
96
+ "eval_steps_per_second": 2.446,
97
+ "eval_wer": 0.4177215189873418,
98
+ "step": 3000
99
+ },
100
+ {
101
+ "epoch": 7.99,
102
+ "learning_rate": 5.993150684931506e-07,
103
+ "loss": 0.1608,
104
+ "step": 3500
105
+ },
106
+ {
107
+ "epoch": 7.99,
108
+ "eval_loss": 0.5160985589027405,
109
+ "eval_runtime": 181.598,
110
+ "eval_samples_per_second": 19.273,
111
+ "eval_steps_per_second": 2.412,
112
+ "eval_wer": 0.40187618389007485,
113
+ "step": 3500
114
+ },
115
+ {
116
+ "epoch": 8.0,
117
+ "step": 3504,
118
+ "total_flos": 7.417092095436165e+18,
119
+ "train_loss": 0.9386324367770865,
120
+ "train_runtime": 4216.4813,
121
+ "train_samples_per_second": 13.281,
122
+ "train_steps_per_second": 0.831
123
+ }
124
+ ],
125
+ "max_steps": 3504,
126
+ "num_train_epochs": 8,
127
+ "total_flos": 7.417092095436165e+18,
128
+ "trial_name": null,
129
+ "trial_params": null
130
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a24f7a88f43f1dc912b86bff41a4f4a9653d396ed16824971198ad28a36c4c8
3
+ size 3055
vocab.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"!": 1, "\"": 2, "'": 3, ",": 4, "-": 5, ".": 6, ":": 7, ";": 8, "?": 9, "_": 10, "`": 11, "a": 12, "b": 13, "c": 14, "d": 15, "e": 16, "f": 17, "g": 18, "h": 19, "i": 20, "j": 21, "k": 22, "l": 23, "m": 24, "n": 25, "o": 26, "p": 27, "q": 28, "r": 29, "s": 30, "t": 31, "u": 32, "v": 33, "w": 34, "x": 35, "y": 36, "z": 37, "¡": 38, "¿": 39, "ß": 40, "à": 41, "á": 42, "â": 43, "ã": 44, "ä": 45, "é": 46, "ê": 47, "í": 48, "ñ": 49, "ó": 50, "ö": 51, "ú": 52, "ü": 53, "ā": 54, "ł": 55, "ō": 56, "ź": 57, "‘": 58, "’": 59, "“": 60, "…": 61, "ひ": 62, "ヒ": 63, "比": 64, "": 65, "|": 0, "[UNK]": 66, "[PAD]": 67}