ilyes rebai
commited on
Commit
•
f154900
1
Parent(s):
472d9af
add new models with high performance
Browse files- README.md +17 -0
- config.json → v0.1/config.json +0 -0
- preprocessor_config.json → v0.1/preprocessor_config.json +0 -0
- pytorch_model.bin → v0.1/pytorch_model.bin +0 -0
- special_tokens_map.json → v0.1/special_tokens_map.json +0 -0
- tokenizer_config.json → v0.1/tokenizer_config.json +0 -0
- vocab.json → v0.1/vocab.json +0 -0
- v1.0/config.json +78 -0
- v1.0/preprocessor_config.json +8 -0
- v1.0/pytorch_model.bin +3 -0
- v1.0/results.txt +2 -0
- v1.0/special_tokens_map.json +1 -0
- v1.0/text.txt +0 -0
- v1.0/tokenizer_config.json +1 -0
- v1.0/trans.txt +0 -0
- v1.0/vocab.json +1 -0
- v2.0/config.json +78 -0
- v2.0/preprocessor_config.json +8 -0
- v2.0/pytorch_model.bin +3 -0
- v2.0/results.txt +2 -0
- v2.0/special_tokens_map.json +1 -0
- v2.0/text.txt +0 -0
- v2.0/tokenizer_config.json +1 -0
- v2.0/trans.txt +0 -0
- v2.0/vocab.json +1 -0
README.md
CHANGED
@@ -23,6 +23,9 @@ model-index:
|
|
23 |
value: 20.89%
|
24 |
---
|
25 |
## Evaluation on Common Voice FR Test
|
|
|
|
|
|
|
26 |
```python
|
27 |
import torch
|
28 |
import torchaudio
|
@@ -73,6 +76,20 @@ print(wer.compute(predictions=result["predicted"], references=result["target"]))
|
|
73 |
|
74 |
## Results
|
75 |
|
|
|
|
|
76 |
WER=18.29%
|
77 |
|
78 |
SER=71.44%
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
value: 20.89%
|
24 |
---
|
25 |
## Evaluation on Common Voice FR Test
|
26 |
+
The script used for training and evaluation can be found here: https://github.com/irebai/wav2vec2
|
27 |
+
|
28 |
+
|
29 |
```python
|
30 |
import torch
|
31 |
import torchaudio
|
|
|
76 |
|
77 |
## Results
|
78 |
|
79 |
+
# v0.1
|
80 |
+
|
81 |
WER=18.29%
|
82 |
|
83 |
SER=71.44%
|
84 |
+
|
85 |
+
# v1.0
|
86 |
+
|
87 |
+
WER=15.97%
|
88 |
+
|
89 |
+
CER=5.51%
|
90 |
+
|
91 |
+
# v2.0
|
92 |
+
|
93 |
+
WER=14.71%
|
94 |
+
|
95 |
+
CER=5.06%
|
config.json → v0.1/config.json
RENAMED
File without changes
|
preprocessor_config.json → v0.1/preprocessor_config.json
RENAMED
File without changes
|
pytorch_model.bin → v0.1/pytorch_model.bin
RENAMED
File without changes
|
special_tokens_map.json → v0.1/special_tokens_map.json
RENAMED
File without changes
|
tokenizer_config.json → v0.1/tokenizer_config.json
RENAMED
File without changes
|
vocab.json → v0.1/vocab.json
RENAMED
File without changes
|
v1.0/config.json
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/workspace/output_models/wav2vec2-large-xlsr-53/checkpoint-71900",
|
3 |
+
"activation_dropout": 0.055,
|
4 |
+
"apply_spec_augment": true,
|
5 |
+
"architectures": [
|
6 |
+
"Wav2Vec2ForCTC"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.094,
|
9 |
+
"bos_token_id": 1,
|
10 |
+
"conv_bias": true,
|
11 |
+
"conv_dim": [
|
12 |
+
512,
|
13 |
+
512,
|
14 |
+
512,
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512,
|
18 |
+
512
|
19 |
+
],
|
20 |
+
"conv_kernel": [
|
21 |
+
10,
|
22 |
+
3,
|
23 |
+
3,
|
24 |
+
3,
|
25 |
+
3,
|
26 |
+
2,
|
27 |
+
2
|
28 |
+
],
|
29 |
+
"conv_stride": [
|
30 |
+
5,
|
31 |
+
2,
|
32 |
+
2,
|
33 |
+
2,
|
34 |
+
2,
|
35 |
+
2,
|
36 |
+
2
|
37 |
+
],
|
38 |
+
"ctc_loss_reduction": "mean",
|
39 |
+
"ctc_zero_infinity": false,
|
40 |
+
"do_stable_layer_norm": true,
|
41 |
+
"eos_token_id": 2,
|
42 |
+
"feat_extract_activation": "gelu",
|
43 |
+
"feat_extract_dropout": 0.0,
|
44 |
+
"feat_extract_norm": "layer",
|
45 |
+
"feat_proj_dropout": 0.04,
|
46 |
+
"final_dropout": 0.0,
|
47 |
+
"gradient_checkpointing": true,
|
48 |
+
"hidden_act": "gelu",
|
49 |
+
"hidden_dropout": 0.047,
|
50 |
+
"hidden_size": 1024,
|
51 |
+
"initializer_range": 0.02,
|
52 |
+
"intermediate_size": 4096,
|
53 |
+
"layer_norm_eps": 1e-05,
|
54 |
+
"layerdrop": 0.041,
|
55 |
+
"mask_channel_length": 10,
|
56 |
+
"mask_channel_min_space": 1,
|
57 |
+
"mask_channel_other": 0.0,
|
58 |
+
"mask_channel_prob": 0.0,
|
59 |
+
"mask_channel_selection": "static",
|
60 |
+
"mask_feature_length": 10,
|
61 |
+
"mask_feature_prob": 0.0,
|
62 |
+
"mask_time_length": 10,
|
63 |
+
"mask_time_min_space": 1,
|
64 |
+
"mask_time_other": 0.0,
|
65 |
+
"mask_time_prob": 0.4,
|
66 |
+
"mask_time_selection": "static",
|
67 |
+
"model_type": "wav2vec2",
|
68 |
+
"num_attention_heads": 16,
|
69 |
+
"num_conv_pos_embedding_groups": 16,
|
70 |
+
"num_conv_pos_embeddings": 128,
|
71 |
+
"num_feat_extract_layers": 7,
|
72 |
+
"num_hidden_layers": 24,
|
73 |
+
"pad_token_id": 46,
|
74 |
+
"pooling_type": "max",
|
75 |
+
"time_pooling_size": 1,
|
76 |
+
"transformers_version": "4.5.0.dev0",
|
77 |
+
"vocab_size": 47
|
78 |
+
}
|
v1.0/preprocessor_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_size": 1,
|
4 |
+
"padding_side": "right",
|
5 |
+
"padding_value": 0.0,
|
6 |
+
"return_attention_mask": true,
|
7 |
+
"sampling_rate": 16000
|
8 |
+
}
|
v1.0/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0922012f44439677c60c929ef48938ec2c3ae6ec717523cae13988a2d22bfd99
|
3 |
+
size 1262144708
|
v1.0/results.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
WER=15.97816949508748
|
2 |
+
CER=5.518223907734944
|
v1.0/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "pad_token": "<pad>"}
|
v1.0/text.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
v1.0/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": null, "eos_token": null, "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|"}
|
v1.0/trans.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
v1.0/vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"a": 0, "e": 1, "i": 2, "o": 3, "u": 4, "y": 5, "b": 6, "c": 7, "d": 8, "f": 9, "g": 10, "h": 11, "j": 12, "k": 13, "l": 14, "m": 15, "n": 16, "p": 17, "q": 18, "r": 19, "s": 20, "t": 21, "v": 22, "w": 23, "x": 24, "z": 25, "à": 26, "â": 27, "æ": 28, "ç": 29, "è": 30, "é": 31, "ê": 32, "ë": 33, "î": 34, "ï": 35, "ô": 36, "œ": 37, "ù": 38, "û": 39, "ü": 40, "ÿ": 41, "'": 42, "-": 43, "|": 44, "<unk>": 45, "<pad>": 46}
|
v2.0/config.json
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "facebook/wav2vec2-large-xlsr-53",
|
3 |
+
"activation_dropout": 0.055,
|
4 |
+
"apply_spec_augment": true,
|
5 |
+
"architectures": [
|
6 |
+
"Wav2Vec2ForCTC"
|
7 |
+
],
|
8 |
+
"attention_dropout": 0.094,
|
9 |
+
"bos_token_id": 1,
|
10 |
+
"conv_bias": true,
|
11 |
+
"conv_dim": [
|
12 |
+
512,
|
13 |
+
512,
|
14 |
+
512,
|
15 |
+
512,
|
16 |
+
512,
|
17 |
+
512,
|
18 |
+
512
|
19 |
+
],
|
20 |
+
"conv_kernel": [
|
21 |
+
10,
|
22 |
+
3,
|
23 |
+
3,
|
24 |
+
3,
|
25 |
+
3,
|
26 |
+
2,
|
27 |
+
2
|
28 |
+
],
|
29 |
+
"conv_stride": [
|
30 |
+
5,
|
31 |
+
2,
|
32 |
+
2,
|
33 |
+
2,
|
34 |
+
2,
|
35 |
+
2,
|
36 |
+
2
|
37 |
+
],
|
38 |
+
"ctc_loss_reduction": "mean",
|
39 |
+
"ctc_zero_infinity": false,
|
40 |
+
"do_stable_layer_norm": true,
|
41 |
+
"eos_token_id": 2,
|
42 |
+
"feat_extract_activation": "gelu",
|
43 |
+
"feat_extract_dropout": 0.0,
|
44 |
+
"feat_extract_norm": "layer",
|
45 |
+
"feat_proj_dropout": 0.04,
|
46 |
+
"final_dropout": 0.0,
|
47 |
+
"gradient_checkpointing": true,
|
48 |
+
"hidden_act": "gelu",
|
49 |
+
"hidden_dropout": 0.047,
|
50 |
+
"hidden_size": 1024,
|
51 |
+
"initializer_range": 0.02,
|
52 |
+
"intermediate_size": 4096,
|
53 |
+
"layer_norm_eps": 1e-05,
|
54 |
+
"layerdrop": 0.041,
|
55 |
+
"mask_channel_length": 10,
|
56 |
+
"mask_channel_min_space": 1,
|
57 |
+
"mask_channel_other": 0.0,
|
58 |
+
"mask_channel_prob": 0.0,
|
59 |
+
"mask_channel_selection": "static",
|
60 |
+
"mask_feature_length": 10,
|
61 |
+
"mask_feature_prob": 0.0,
|
62 |
+
"mask_time_length": 10,
|
63 |
+
"mask_time_min_space": 1,
|
64 |
+
"mask_time_other": 0.0,
|
65 |
+
"mask_time_prob": 0.2,
|
66 |
+
"mask_time_selection": "static",
|
67 |
+
"model_type": "wav2vec2",
|
68 |
+
"num_attention_heads": 16,
|
69 |
+
"num_conv_pos_embedding_groups": 16,
|
70 |
+
"num_conv_pos_embeddings": 128,
|
71 |
+
"num_feat_extract_layers": 7,
|
72 |
+
"num_hidden_layers": 24,
|
73 |
+
"pad_token_id": 1,
|
74 |
+
"pooling_type": "max",
|
75 |
+
"time_pooling_size": 1,
|
76 |
+
"transformers_version": "4.5.0.dev0",
|
77 |
+
"vocab_size": 47
|
78 |
+
}
|
v2.0/preprocessor_config.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"feature_size": 1,
|
4 |
+
"padding_side": "right",
|
5 |
+
"padding_value": 0.0,
|
6 |
+
"return_attention_mask": true,
|
7 |
+
"sampling_rate": 16000
|
8 |
+
}
|
v2.0/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:536491b9fe83421402af342b175daba8ea082099c742492c4e5000403833b7bf
|
3 |
+
size 1262144708
|
v2.0/results.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
WER=14.711290915777859
|
2 |
+
CER=5.068944791354929
|
v2.0/special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "<bos>", "eos_token": "<eos>", "unk_token": "<unk>", "pad_token": "<pad>"}
|
v2.0/text.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
v2.0/tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"unk_token": "<unk>", "bos_token": "<bos>", "eos_token": "<eos>", "pad_token": "<pad>", "do_lower_case": false, "word_delimiter_token": "|"}
|
v2.0/trans.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
v2.0/vocab.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<unk>": 0, "<pad>": 1, "a": 2, "e": 3, "i": 4, "o": 5, "u": 6, "y": 7, "b": 8, "c": 9, "d": 10, "f": 11, "g": 12, "h": 13, "j": 14, "k": 15, "l": 16, "m": 17, "n": 18, "p": 19, "q": 20, "r": 21, "s": 22, "t": 23, "v": 24, "w": 25, "x": 26, "z": 27, "à": 28, "â": 29, "æ": 30, "ç": 31, "è": 32, "é": 33, "ê": 34, "ë": 35, "î": 36, "ï": 37, "ô": 38, "œ": 39, "ù": 40, "û": 41, "ü": 42, "ÿ": 43, "'": 44, "-": 45, "|": 46}
|