jonatasgrosman commited on
Commit
bbd3439
1 Parent(s): 43759aa

update model

Browse files
Files changed (5) hide show
  1. README.md +19 -11
  2. config.json +19 -4
  3. preprocessor_config.json +1 -0
  4. pytorch_model.bin +2 -2
  5. vocab.json +1 -1
README.md CHANGED
@@ -24,10 +24,10 @@ model-index:
24
  metrics:
25
  - name: Test WER
26
  type: wer
27
- value: 11.85
28
  - name: Test CER
29
  type: cer
30
- value: 3.17
31
  ---
32
 
33
  # Wav2Vec2-Large-XLSR-53-German
@@ -49,7 +49,7 @@ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
49
 
50
  LANG_ID = "de"
51
  MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-german"
52
- SAMPLES = 5
53
 
54
  test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]")
55
 
@@ -82,10 +82,15 @@ for i, predicted_sentence in enumerate(predicted_sentences):
82
  | Reference | Prediction |
83
  | ------------- | ------------- |
84
  | ZIEHT EUCH BITTE DRAUSSEN DIE SCHUHE AUS. | ZIEHT EUCH BITTE DRAUSSEN DIE SCHUHE AUS |
85
- | ES KOMMT ZUM SHOWDOWN IN GSTAAD. | ES GRONTEHILSCHONDEBAR ENBESTACDEN |
86
- | IHRE FOTOSTRECKEN ERSCHIENEN IN MODEMAGAZINEN WIE DER VOGUE, HARPER’S BAZAAR UND MARIE CLAIRE. | IHRE FROTESTRECKEN ERSCHIENEN IN MODEMAGAZINEN WIE DER VOLKE-APERS BASAR VAREQER |
87
- | FELIPE HAT EINE AUCH FÜR MONARCHEN UNGEWÖHNLICH LANGE TITELLISTE. | FIELIPPE HATE EINE AUCH FÜR MONACHEN UNGEWÖHNLICH LANGE TITELLISTE |
88
- | ER WURDE ZU EHREN DES REICHSKANZLERS OTTO VON BISMARCK ERRICHTET. | ER WURDE ZU EHREN DES REICHSKANZLERS OTTO VON BISMARK ERRICHTET |
 
 
 
 
 
89
 
90
  ## Evaluation
91
 
@@ -102,9 +107,11 @@ LANG_ID = "de"
102
  MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-german"
103
  DEVICE = "cuda"
104
 
105
- CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
106
  "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
107
- "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。"]
 
 
108
 
109
  test_dataset = load_dataset("common_voice", LANG_ID, split="test")
110
 
@@ -152,11 +159,12 @@ print(f"CER: {cer.compute(predictions=predictions, references=references, chunk_
152
 
153
  **Test Result**:
154
 
155
- In the table below I report the Word Error Rate (WER) and the Character Error Rate (CER) of the model. I ran the evaluation script described above on other models as well (on 2021-04-22). Note that the table below may show different results from those already reported, this may have been caused due to some specificity of the other evaluation scripts used.
156
 
157
  | Model | WER | CER |
158
  | ------------- | ------------- | ------------- |
159
- | jonatasgrosman/wav2vec2-large-xlsr-53-german | **11.85%** | **3.17%** |
 
160
  | maxidl/wav2vec2-large-xlsr-german | 13.10% | 3.64% |
161
  | marcel/wav2vec2-large-xlsr-53-german | 15.97% | 4.37% |
162
  | flozi00/wav2vec-xlsr-german | 16.13% | 4.33% |
 
24
  metrics:
25
  - name: Test WER
26
  type: wer
27
+ value: 10.55
28
  - name: Test CER
29
  type: cer
30
+ value: 2.81
31
  ---
32
 
33
  # Wav2Vec2-Large-XLSR-53-German
 
49
 
50
  LANG_ID = "de"
51
  MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-german"
52
+ SAMPLES = 10
53
 
54
  test_dataset = load_dataset("common_voice", LANG_ID, split=f"test[:{SAMPLES}]")
55
 
 
82
  | Reference | Prediction |
83
  | ------------- | ------------- |
84
  | ZIEHT EUCH BITTE DRAUSSEN DIE SCHUHE AUS. | ZIEHT EUCH BITTE DRAUSSEN DIE SCHUHE AUS |
85
+ | ES KOMMT ZUM SHOWDOWN IN GSTAAD. | ES KOMMT ZUG STUNDEDAUTENESTERKT |
86
+ | IHRE FOTOSTRECKEN ERSCHIENEN IN MODEMAGAZINEN WIE DER VOGUE, HARPER’S BAZAAR UND MARIE CLAIRE. | IHRE FOTELSTRECKEN ERSCHIENEN MIT MODEMAGAZINEN WIE DER VALG AT DAS BASIN MA RIQUAIR |
87
+ | FELIPE HAT EINE AUCH FÜR MONARCHEN UNGEWÖHNLICH LANGE TITELLISTE. | FELIPPE HAT EINE AUCH FÜR MONACHEN UNGEWÖHNLICH LANGE TITELLISTE |
88
+ | ER WURDE ZU EHREN DES REICHSKANZLERS OTTO VON BISMARCK ERRICHTET. | ER WURDE ZU EHREN DES REICHSKANZLERS OTTO VON BISMARCK ERRICHTET M |
89
+ | WAS SOLLS, ICH BIN BEREIT. | WAS SOLL'S ICH BIN BEREIT |
90
+ | DAS INTERNET BESTEHT AUS VIELEN COMPUTERN, DIE MITEINANDER VERBUNDEN SIND. | DAS INTERNET BESTEHT AUS VIELEN COMPUTERN DIE MITEINANDER VERBUNDEN SIND |
91
+ | DER URANUS IST DER SIEBENTE PLANET IN UNSEREM SONNENSYSTEM. | DER URANUS IST DER SIEBENTE PLANET IN UNSEREM SONNENSYSTEM |
92
+ | DIE WAGEN ERHIELTEN EIN EINHEITLICHES ERSCHEINUNGSBILD IN WEISS MIT ROTEM FENSTERBAND. | DIE WAGEN ERHIELTEN EIN EINHEITLICHES ERSCHEINUNGSBILD IN WEISS MIT ROTEM FENSTERBAND |
93
+ | SIE WAR DIE COUSINE VON CARL MARIA VON WEBER. | SIE WAR DIE COUSINE VON KARL-MARIA VON WEBER |
94
 
95
  ## Evaluation
96
 
 
107
  MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-german"
108
  DEVICE = "cuda"
109
 
110
+ CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ";", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
111
  "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
112
+ "{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。",
113
+ "、", "﹂", "﹁", "‧", "~", "﹏", ",", "{", "}", "(", ")", "[", "]", "【", "】", "‥", "〽",
114
+ "『", "』", "〝", "〟", "⟨", "⟩", "〜", ":", "!", "?", "♪", "؛", "/", "\\", "º", "−", "^", "ʻ", "ˆ"]
115
 
116
  test_dataset = load_dataset("common_voice", LANG_ID, split="test")
117
 
 
159
 
160
  **Test Result**:
161
 
162
+ In the table below I report the Word Error Rate (WER) and the Character Error Rate (CER) of the model. I ran the evaluation script described above on other models as well (on 2021-06-17). Note that the table below may show different results from those already reported, this may have been caused due to some specificity of the other evaluation scripts used.
163
 
164
  | Model | WER | CER |
165
  | ------------- | ------------- | ------------- |
166
+ | jonatasgrosman/wav2vec2-large-xlsr-53-german | **10.55%** | **2.81%** |
167
+ | Noricum/wav2vec2-large-xlsr-53-german | 11.06% | 2.99% |
168
  | maxidl/wav2vec2-large-xlsr-german | 13.10% | 3.64% |
169
  | marcel/wav2vec2-large-xlsr-53-german | 15.97% | 4.37% |
170
  | flozi00/wav2vec-xlsr-german | 16.13% | 4.33% |
config.json CHANGED
@@ -7,6 +7,8 @@
7
  ],
8
  "attention_dropout": 0.1,
9
  "bos_token_id": 1,
 
 
10
  "conv_bias": true,
11
  "conv_dim": [
12
  512,
@@ -37,33 +39,46 @@
37
  ],
38
  "ctc_loss_reduction": "mean",
39
  "ctc_zero_infinity": true,
 
40
  "do_stable_layer_norm": true,
41
  "eos_token_id": 2,
42
  "feat_extract_activation": "gelu",
43
  "feat_extract_dropout": 0.0,
44
  "feat_extract_norm": "layer",
45
  "feat_proj_dropout": 0.05,
46
- "final_dropout": 0.1,
 
47
  "gradient_checkpointing": true,
48
  "hidden_act": "gelu",
49
  "hidden_dropout": 0.05,
50
- "hidden_dropout_prob": 0.1,
51
  "hidden_size": 1024,
52
  "initializer_range": 0.02,
53
  "intermediate_size": 4096,
54
  "layer_norm_eps": 1e-05,
55
  "layerdrop": 0.05,
 
 
 
 
 
56
  "mask_feature_length": 10,
57
  "mask_feature_prob": 0.0,
58
  "mask_time_length": 10,
 
 
59
  "mask_time_prob": 0.05,
 
60
  "model_type": "wav2vec2",
61
  "num_attention_heads": 16,
 
 
62
  "num_conv_pos_embedding_groups": 16,
63
  "num_conv_pos_embeddings": 128,
64
  "num_feat_extract_layers": 7,
65
  "num_hidden_layers": 24,
 
66
  "pad_token_id": 0,
67
- "transformers_version": "4.5.0.dev0",
68
- "vocab_size": 36
 
69
  }
 
7
  ],
8
  "attention_dropout": 0.1,
9
  "bos_token_id": 1,
10
+ "codevector_dim": 768,
11
+ "contrastive_logits_temperature": 0.1,
12
  "conv_bias": true,
13
  "conv_dim": [
14
  512,
 
39
  ],
40
  "ctc_loss_reduction": "mean",
41
  "ctc_zero_infinity": true,
42
+ "diversity_loss_weight": 0.1,
43
  "do_stable_layer_norm": true,
44
  "eos_token_id": 2,
45
  "feat_extract_activation": "gelu",
46
  "feat_extract_dropout": 0.0,
47
  "feat_extract_norm": "layer",
48
  "feat_proj_dropout": 0.05,
49
+ "feat_quantizer_dropout": 0.0,
50
+ "final_dropout": 0.0,
51
  "gradient_checkpointing": true,
52
  "hidden_act": "gelu",
53
  "hidden_dropout": 0.05,
 
54
  "hidden_size": 1024,
55
  "initializer_range": 0.02,
56
  "intermediate_size": 4096,
57
  "layer_norm_eps": 1e-05,
58
  "layerdrop": 0.05,
59
+ "mask_channel_length": 10,
60
+ "mask_channel_min_space": 1,
61
+ "mask_channel_other": 0.0,
62
+ "mask_channel_prob": 0.0,
63
+ "mask_channel_selection": "static",
64
  "mask_feature_length": 10,
65
  "mask_feature_prob": 0.0,
66
  "mask_time_length": 10,
67
+ "mask_time_min_space": 1,
68
+ "mask_time_other": 0.0,
69
  "mask_time_prob": 0.05,
70
+ "mask_time_selection": "static",
71
  "model_type": "wav2vec2",
72
  "num_attention_heads": 16,
73
+ "num_codevector_groups": 2,
74
+ "num_codevectors_per_group": 320,
75
  "num_conv_pos_embedding_groups": 16,
76
  "num_conv_pos_embeddings": 128,
77
  "num_feat_extract_layers": 7,
78
  "num_hidden_layers": 24,
79
+ "num_negatives": 100,
80
  "pad_token_id": 0,
81
+ "proj_codevector_dim": 768,
82
+ "transformers_version": "4.7.0.dev0",
83
+ "vocab_size": 38
84
  }
preprocessor_config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "do_normalize": true,
 
3
  "feature_size": 1,
4
  "padding_side": "right",
5
  "padding_value": 0.0,
 
1
  {
2
  "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
  "feature_size": 1,
5
  "padding_side": "right",
6
  "padding_value": 0.0,
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:230f7682c6576a1c855a884b6faf1d52e21ca70f86e426a7c2c1744cd0100b08
3
- size 1262081431
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bff6d75ab89d8ca9cd103df9beb9c10f547501cf5a34aeabea1c8d736c1b81cb
3
+ size 1262089623
vocab.json CHANGED
@@ -1 +1 @@
1
- {"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "E": 5, "N": 6, "I": 7, "S": 8, "R": 9, "T": 10, "A": 11, "H": 12, "D": 13, "U": 14, "L": 15, "C": 16, "G": 17, "M": 18, "O": 19, "B": 20, "W": 21, "F": 22, "K": 23, "Z": 24, "V": 25, "Ü": 26, "P": 27, "Ä": 28, "Ö": 29, "J": 30, "Y": 31, "'": 32, "X": 33, "Q": 34, "-": 35}
 
1
+ {"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "'": 5, "-": 6, "A": 7, "B": 8, "C": 9, "D": 10, "E": 11, "F": 12, "G": 13, "H": 14, "I": 15, "J": 16, "K": 17, "L": 18, "M": 19, "N": 20, "O": 21, "P": 22, "Q": 23, "R": 24, "S": 25, "T": 26, "U": 27, "V": 28, "W": 29, "X": 30, "Y": 31, "Z": 32, "Ä": 33, "Í": 34, "Ó": 35, "Ö": 36, "Ü": 37}