jonatasgrosman commited on
Commit
2c17858
1 Parent(s): 4790032

update model

Browse files
Files changed (5) hide show
  1. README.md +17 -10
  2. config.json +10 -2
  3. preprocessor_config.json +1 -0
  4. pytorch_model.bin +2 -2
  5. vocab.json +1 -1
README.md CHANGED
@@ -24,10 +24,10 @@ model-index:
24
  metrics:
25
  - name: Test WER
26
  type: wer
27
- value: 11.17
28
  - name: Test CER
29
  type: cer
30
- value: 2.87
31
  ---
32
 
33
  # Wav2Vec2-Large-XLSR-53-Polish
@@ -81,11 +81,16 @@ for i, predicted_sentence in enumerate(predicted_sentences):
81
 
82
  | Reference | Prediction |
83
  | ------------- | ------------- |
84
- | """CZY DRZWI BYŁY ZAMKNIĘTE?""" | CZY DRZWI BYŁY ZAMKNIĘTE |
85
- | GDZIEŻ TU POWÓD DO WYRZUTÓW? | GDZIEŻ TO POWÓD DO WYŻYTÓW |
86
- | """O TEM JEDNAK NIE BYŁO MOWY.""" | O TEM JEDNAK NIE BYŁO MOWY N |
87
- | LUBIĘ GO. | LUBIĘ GO |
88
- | — TO MI NIE POMAGA. | TO MI NIE POMAGA |
 
 
 
 
 
89
 
90
  ## Evaluation
91
 
@@ -102,9 +107,11 @@ LANG_ID = "pl"
102
  MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-polish"
103
  DEVICE = "cuda"
104
 
105
- CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
106
  "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
107
- "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。"]
 
 
108
 
109
  test_dataset = load_dataset("common_voice", LANG_ID, split="test")
110
 
@@ -156,7 +163,7 @@ In the table below I report the Word Error Rate (WER) and the Character Error Ra
156
 
157
  | Model | WER | CER |
158
  | ------------- | ------------- | ------------- |
159
- | jonatasgrosman/wav2vec2-large-xlsr-53-polish | **11.17%** | **2.87%** |
160
  | facebook/wav2vec2-large-xlsr-53-polish | 20.17% | 5.38% |
161
  | alexcleu/wav2vec2-large-xlsr-polish | 21.72% | 5.17% |
162
  | mbien/wav2vec2-large-xlsr-polish | 22.93% | 5.13% |
 
24
  metrics:
25
  - name: Test WER
26
  type: wer
27
+ value: 14.33
28
  - name: Test CER
29
  type: cer
30
+ value: 3.36
31
  ---
32
 
33
  # Wav2Vec2-Large-XLSR-53-Polish
 
81
 
82
  | Reference | Prediction |
83
  | ------------- | ------------- |
84
+ | """CZY DRZWI BYŁY ZAMKNIĘTE?""" | PRZY DRZWI BYŁY ZAMKNIĘTE |
85
+ | GDZIEŻ TU POWÓD DO WYRZUTÓW? | WGDZIEŻ TO POM DO WYRYDÓ |
86
+ | """O TEM JEDNAK NIE BYŁO MOWY.""" | O TEM JEDNAK NIE BYŁO MOWY |
87
+ | LUBIĘ GO. | LUBIĄ GO |
88
+ | — TO MI NIE POMAGA. | TO MNIE NIE POMAGA |
89
+ | WCIĄŻ LUDZIE WYSIADAJĄ PRZED ZAMKIEM, Z MIASTA, Z PRAGI. | WCIĄŻ LUDZIE WYSIADAJĄ PRZED ZAMKIEM Z MIASTA Z PRAGI |
90
+ | ALE ON WCALE INACZEJ NIE MYŚLAŁ. | ONY MONITCENIE PONACZUŁA NA MASU |
91
+ | A WY, CO TAK STOICIE? | A WY CO TAK STOICIE |
92
+ | A TEN PRZYRZĄD DO CZEGO SŁUŻY? | A TEN PRZYRZĄD DO CZEGO SŁUŻY |
93
+ | NA JUTRZEJSZYM KOLOKWIUM BĘDZIE PIĘĆ PYTAŃ OTWARTYCH I TEST WIELOKROTNEGO WYBORU. | NAJUTRZEJSZYM KOLOKWIUM BĘDZIE PIĘĆ PYTAŃ OTWARTYCH I TEST WIELOKROTNEGO WYBORU |
94
 
95
  ## Evaluation
96
 
 
107
  MODEL_ID = "jonatasgrosman/wav2vec2-large-xlsr-53-polish"
108
  DEVICE = "cuda"
109
 
110
+ CHARS_TO_IGNORE = [",", "?", "¿", ".", "!", "¡", ";", ";", ":", '""', "%", '"', "�", "ʿ", "·", "჻", "~", "՞",
111
  "؟", "،", "।", "॥", "«", "»", "„", "“", "”", "「", "」", "‘", "’", "《", "》", "(", ")", "[", "]",
112
+ "{", "}", "=", "`", "_", "+", "<", ">", "…", "–", "°", "´", "ʾ", "‹", "›", "©", "®", "—", "→", "。",
113
+ "、", "﹂", "﹁", "‧", "~", "﹏", ",", "{", "}", "(", ")", "[", "]", "【", "】", "‥", "〽",
114
+ "『", "』", "〝", "〟", "⟨", "⟩", "〜", ":", "!", "?", "♪", "؛", "/", "\\", "º", "−", "^", "ʻ", "ˆ"]
115
 
116
  test_dataset = load_dataset("common_voice", LANG_ID, split="test")
117
 
 
163
 
164
  | Model | WER | CER |
165
  | ------------- | ------------- | ------------- |
166
+ | jonatasgrosman/wav2vec2-large-xlsr-53-polish | **14.33%** | **3.36%** |
167
  | facebook/wav2vec2-large-xlsr-53-polish | 20.17% | 5.38% |
168
  | alexcleu/wav2vec2-large-xlsr-polish | 21.72% | 5.17% |
169
  | mbien/wav2vec2-large-xlsr-polish | 22.93% | 5.13% |
config.json CHANGED
@@ -7,6 +7,8 @@
7
  ],
8
  "attention_dropout": 0.1,
9
  "bos_token_id": 1,
 
 
10
  "conv_bias": true,
11
  "conv_dim": [
12
  512,
@@ -37,12 +39,14 @@
37
  ],
38
  "ctc_loss_reduction": "mean",
39
  "ctc_zero_infinity": true,
 
40
  "do_stable_layer_norm": true,
41
  "eos_token_id": 2,
42
  "feat_extract_activation": "gelu",
43
  "feat_extract_dropout": 0.0,
44
  "feat_extract_norm": "layer",
45
  "feat_proj_dropout": 0.05,
 
46
  "final_dropout": 0.0,
47
  "gradient_checkpointing": true,
48
  "hidden_act": "gelu",
@@ -66,11 +70,15 @@
66
  "mask_time_selection": "static",
67
  "model_type": "wav2vec2",
68
  "num_attention_heads": 16,
 
 
69
  "num_conv_pos_embedding_groups": 16,
70
  "num_conv_pos_embeddings": 128,
71
  "num_feat_extract_layers": 7,
72
  "num_hidden_layers": 24,
 
73
  "pad_token_id": 0,
74
- "transformers_version": "4.5.0.dev0",
75
- "vocab_size": 42
 
76
  }
 
7
  ],
8
  "attention_dropout": 0.1,
9
  "bos_token_id": 1,
10
+ "codevector_dim": 768,
11
+ "contrastive_logits_temperature": 0.1,
12
  "conv_bias": true,
13
  "conv_dim": [
14
  512,
 
39
  ],
40
  "ctc_loss_reduction": "mean",
41
  "ctc_zero_infinity": true,
42
+ "diversity_loss_weight": 0.1,
43
  "do_stable_layer_norm": true,
44
  "eos_token_id": 2,
45
  "feat_extract_activation": "gelu",
46
  "feat_extract_dropout": 0.0,
47
  "feat_extract_norm": "layer",
48
  "feat_proj_dropout": 0.05,
49
+ "feat_quantizer_dropout": 0.0,
50
  "final_dropout": 0.0,
51
  "gradient_checkpointing": true,
52
  "hidden_act": "gelu",
 
70
  "mask_time_selection": "static",
71
  "model_type": "wav2vec2",
72
  "num_attention_heads": 16,
73
+ "num_codevector_groups": 2,
74
+ "num_codevectors_per_group": 320,
75
  "num_conv_pos_embedding_groups": 16,
76
  "num_conv_pos_embeddings": 128,
77
  "num_feat_extract_layers": 7,
78
  "num_hidden_layers": 24,
79
+ "num_negatives": 100,
80
  "pad_token_id": 0,
81
+ "proj_codevector_dim": 768,
82
+ "transformers_version": "4.7.0.dev0",
83
+ "vocab_size": 40
84
  }
preprocessor_config.json CHANGED
@@ -1,5 +1,6 @@
1
  {
2
  "do_normalize": true,
 
3
  "feature_size": 1,
4
  "padding_side": "right",
5
  "padding_value": 0.0,
 
1
  {
2
  "do_normalize": true,
3
+ "feature_extractor_type": "Wav2Vec2FeatureExtractor",
4
  "feature_size": 1,
5
  "padding_side": "right",
6
  "padding_value": 0.0,
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:411ade8d442a8d084db3c0cc0f30ac40b38b16debf49ea835400a3a121fec6fb
3
- size 1262106007
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bd2a29f3b6093cc7a0f9ad39d56b5b11ae00f588878035e117e379dd3c204a6
3
+ size 1262097815
vocab.json CHANGED
@@ -1 +1 @@
1
- {"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "A": 5, "I": 6, "E": 7, "O": 8, "Z": 9, "N": 10, "S": 11, "W": 12, "R": 13, "C": 14, "Y": 15, "M": 16, "T": 17, "D": 18, "K": 19, "P": 20, "Ł": 21, "J": 22, "U": 23, "L": 24, "B": 25, "Ę": 26, "G": 27, "Ą": 28, "Ż": 29, "H": 30, "Ś": 31, "Ó": 32, "Ć": 33, "F": 34, "Ń": 35, "Ź": 36, "V": 37, "-": 38, "Q": 39, "X": 40, "'": 41}
 
1
+ {"<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "|": 4, "-": 5, "A": 6, "B": 7, "C": 8, "D": 9, "E": 10, "F": 11, "G": 12, "H": 13, "I": 14, "J": 15, "K": 16, "L": 17, "M": 18, "N": 19, "O": 20, "P": 21, "R": 22, "S": 23, "T": 24, "U": 25, "V": 26, "W": 27, "X": 28, "Y": 29, "Z": 30, "Ó": 31, "Ą": 32, "Ć": 33, "Ę": 34, "Ł": 35, "Ń": 36, "Ś": 37, "Ź": 38, "Ż": 39}