eval results
Browse files- .ipynb_checkpoints/README-checkpoint.md +85 -0
- .ipynb_checkpoints/eval-checkpoint.py +29 -3
- .ipynb_checkpoints/run_eval-checkpoint.sh +8 -0
- .ipynb_checkpoints/run_speech_recognition_ctc-checkpoint.py +32 -6
- eval.py +29 -3
- log_mozilla-foundation_common_voice_7_0_hi_test_predictions.txt +0 -0
- log_mozilla-foundation_common_voice_7_0_hi_test_targets.txt +0 -0
- mozilla-foundation_common_voice_7_0_hi_test_eval_results.txt +2 -0
- run_eval.sh +8 -0
- run_speech_recognition_ctc.py +32 -6
.ipynb_checkpoints/README-checkpoint.md
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language:
|
3 |
+
- hi
|
4 |
+
license: apache-2.0
|
5 |
+
tags:
|
6 |
+
- automatic-speech-recognition
|
7 |
+
- mozilla-foundation/common_voice_7_0
|
8 |
+
- robust-speech-event
|
9 |
+
- generated_from_trainer
|
10 |
+
datasets:
|
11 |
+
- common_voice
|
12 |
+
model-index:
|
13 |
+
- name: ''
|
14 |
+
results: []
|
15 |
+
---
|
16 |
+
|
17 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
18 |
+
should probably proofread and complete it, then remove this comment. -->
|
19 |
+
|
20 |
+
#
|
21 |
+
|
22 |
+
This model is a fine-tuned version of [facebook/wav2vec2-xls-r-300m](https://huggingface.co/facebook/wav2vec2-xls-r-300m) on the MOZILLA-FOUNDATION/COMMON_VOICE_7_0 - HI dataset.
|
23 |
+
It achieves the following results on the evaluation set:
|
24 |
+
- Loss: 0.7346
|
25 |
+
- Wer: 1.0479
|
26 |
+
|
27 |
+
## Model description
|
28 |
+
|
29 |
+
More information needed
|
30 |
+
|
31 |
+
## Intended uses & limitations
|
32 |
+
|
33 |
+
More information needed
|
34 |
+
|
35 |
+
## Training and evaluation data
|
36 |
+
|
37 |
+
More information needed
|
38 |
+
|
39 |
+
## Training procedure
|
40 |
+
|
41 |
+
### Training hyperparameters
|
42 |
+
|
43 |
+
The following hyperparameters were used during training:
|
44 |
+
- learning_rate: 0.0003
|
45 |
+
- train_batch_size: 16
|
46 |
+
- eval_batch_size: 8
|
47 |
+
- seed: 42
|
48 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
49 |
+
- lr_scheduler_type: linear
|
50 |
+
- lr_scheduler_warmup_steps: 500
|
51 |
+
- training_steps: 8000
|
52 |
+
- mixed_precision_training: Native AMP
|
53 |
+
|
54 |
+
### Training results
|
55 |
+
|
56 |
+
| Training Loss | Epoch | Step | Validation Loss | Wer |
|
57 |
+
|:-------------:|:-----:|:----:|:---------------:|:------:|
|
58 |
+
| No log | 1.36 | 400 | 1.4595 | 1.0039 |
|
59 |
+
| 4.7778 | 2.71 | 800 | 0.8082 | 1.0115 |
|
60 |
+
| 0.6408 | 4.07 | 1200 | 0.7032 | 1.0079 |
|
61 |
+
| 0.3937 | 5.42 | 1600 | 0.6889 | 1.0433 |
|
62 |
+
| 0.3 | 6.78 | 2000 | 0.6820 | 1.0069 |
|
63 |
+
| 0.3 | 8.14 | 2400 | 0.6670 | 1.0196 |
|
64 |
+
| 0.226 | 9.49 | 2800 | 0.7216 | 1.0422 |
|
65 |
+
| 0.197 | 10.85 | 3200 | 0.7669 | 1.0534 |
|
66 |
+
| 0.165 | 12.2 | 3600 | 0.7517 | 1.0200 |
|
67 |
+
| 0.1486 | 13.56 | 4000 | 0.7125 | 1.0357 |
|
68 |
+
| 0.1486 | 14.92 | 4400 | 0.7447 | 1.0347 |
|
69 |
+
| 0.122 | 16.27 | 4800 | 0.6899 | 1.0440 |
|
70 |
+
| 0.1069 | 17.63 | 5200 | 0.7212 | 1.0350 |
|
71 |
+
| 0.0961 | 18.98 | 5600 | 0.7417 | 1.0408 |
|
72 |
+
| 0.086 | 20.34 | 6000 | 0.7402 | 1.0356 |
|
73 |
+
| 0.086 | 21.69 | 6400 | 0.7761 | 1.0420 |
|
74 |
+
| 0.0756 | 23.05 | 6800 | 0.7346 | 1.0369 |
|
75 |
+
| 0.0666 | 24.41 | 7200 | 0.7506 | 1.0449 |
|
76 |
+
| 0.0595 | 25.76 | 7600 | 0.7319 | 1.0476 |
|
77 |
+
| 0.054 | 27.12 | 8000 | 0.7346 | 1.0479 |
|
78 |
+
|
79 |
+
|
80 |
+
### Framework versions
|
81 |
+
|
82 |
+
- Transformers 4.16.0.dev0
|
83 |
+
- Pytorch 1.10.1+cu102
|
84 |
+
- Datasets 1.18.3
|
85 |
+
- Tokenizers 0.11.0
|
.ipynb_checkpoints/eval-checkpoint.py
CHANGED
@@ -47,11 +47,32 @@ def log_results(result: Dataset, args: Dict[str, str]):
|
|
47 |
result.map(write_to_file, with_indices=True)
|
48 |
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
def normalize_text(text: str) -> str:
|
51 |
"""DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
55 |
text = re.sub(chars_to_ignore_regex, "", text.lower())
|
56 |
|
57 |
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
|
@@ -132,6 +153,11 @@ if __name__ == "__main__":
|
|
132 |
default=None,
|
133 |
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
|
134 |
)
|
|
|
|
|
|
|
|
|
|
|
135 |
args = parser.parse_args()
|
136 |
|
137 |
main(args)
|
|
|
47 |
result.map(write_to_file, with_indices=True)
|
48 |
|
49 |
|
50 |
+
def replace_text(text):
|
51 |
+
text=text.replace('β', r'"')
|
52 |
+
text=text.replace('β', r'"')
|
53 |
+
text=text.replace('β', r'"')
|
54 |
+
text=text.replace('β', r'-')
|
55 |
+
text=text.replace('β', r' - ')
|
56 |
+
text=text.replace('Β΄', r"'")
|
57 |
+
text=text.replace('β', r"'")
|
58 |
+
text=text.replace('β', r"'")
|
59 |
+
text=text.replace('β', r"'")
|
60 |
+
text=text.replace("''", r'"')
|
61 |
+
text=text.replace('´´', r'"')
|
62 |
+
|
63 |
+
token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
|
64 |
+
for t in token_sequences_to_ignore:
|
65 |
+
text = " ".join(text.split(t))
|
66 |
+
return text
|
67 |
+
|
68 |
def normalize_text(text: str) -> str:
|
69 |
"""DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
|
70 |
+
# chars_to_ignore_regex = (
|
71 |
+
# f'[{"".join(args.chars_to_ignore)}]' if args.chars_to_ignore is not None else None
|
72 |
+
# )
|
73 |
+
text=replace_text(text)
|
74 |
+
chars_to_ignore_regex = '[,?.!\-\;\:"β%ββοΏ½βββ¦β"\'-]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
|
75 |
+
# print(chars_to_ignore_regex)
|
76 |
text = re.sub(chars_to_ignore_regex, "", text.lower())
|
77 |
|
78 |
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
|
|
|
153 |
default=None,
|
154 |
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
|
155 |
)
|
156 |
+
parser.add_argument(
|
157 |
+
"--chars_to_ignore",
|
158 |
+
default=None,
|
159 |
+
help="characters to ignore in text",
|
160 |
+
)
|
161 |
args = parser.parse_args()
|
162 |
|
163 |
main(args)
|
.ipynb_checkpoints/run_eval-checkpoint.sh
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python eval.py \
|
2 |
+
--model_id "checkpoint-8000" \
|
3 |
+
--dataset "mozilla-foundation/common_voice_7_0" \
|
4 |
+
--config "hi" \
|
5 |
+
--split "test" \
|
6 |
+
--chars_to_ignore , ? . ! - \; \: \" β % β β οΏ½ \
|
7 |
+
--log_outputs
|
8 |
+
|
.ipynb_checkpoints/run_speech_recognition_ctc-checkpoint.py
CHANGED
@@ -435,16 +435,42 @@ def main():
|
|
435 |
# that make training complicated and do not help in transcribing the speech
|
436 |
# E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
|
437 |
# that could be easily picked up by the model
|
438 |
-
|
439 |
-
|
440 |
-
|
|
|
441 |
text_column_name = data_args.text_column_name
|
442 |
|
443 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
444 |
if chars_to_ignore_regex is not None:
|
445 |
-
|
446 |
else:
|
447 |
-
|
|
|
|
|
448 |
return batch
|
449 |
|
450 |
with training_args.main_process_first(desc="dataset map special characters removal"):
|
|
|
435 |
# that make training complicated and do not help in transcribing the speech
|
436 |
# E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
|
437 |
# that could be easily picked up by the model
|
438 |
+
# chars_to_ignore_regex = (
|
439 |
+
# f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
|
440 |
+
# )
|
441 |
+
chars_to_ignore_regex = '[,?.!\-\;\:"β%ββοΏ½βββ¦β]'
|
442 |
text_column_name = data_args.text_column_name
|
443 |
|
444 |
+
|
445 |
+
def replace_text(text):
|
446 |
+
text=text.replace('β', r'"')
|
447 |
+
text=text.replace('β', r'"')
|
448 |
+
text=text.replace('β', r'"')
|
449 |
+
text=text.replace('β', r'-')
|
450 |
+
text=text.replace('β', r' - ')
|
451 |
+
text=text.replace('Β΄', r"'")
|
452 |
+
text=text.replace('β', r"'")
|
453 |
+
text=text.replace('β', r"'")
|
454 |
+
text=text.replace('β', r"'")
|
455 |
+
text=text.replace("''", r'"')
|
456 |
+
text=text.replace('´´', r'"')
|
457 |
+
|
458 |
+
token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
|
459 |
+
for t in token_sequences_to_ignore:
|
460 |
+
text = " ".join(text.split(t))
|
461 |
+
return text
|
462 |
+
|
463 |
+
def remove_special_characters(text):
|
464 |
+
text=batch[text_column_name]
|
465 |
+
text=replace_text(text)
|
466 |
+
|
467 |
+
|
468 |
if chars_to_ignore_regex is not None:
|
469 |
+
target_text = re.sub(chars_to_ignore_regex, "", text).lower() + " "
|
470 |
else:
|
471 |
+
target_text = text.lower() + " "
|
472 |
+
|
473 |
+
batch["target_text"]=target_text
|
474 |
return batch
|
475 |
|
476 |
with training_args.main_process_first(desc="dataset map special characters removal"):
|
eval.py
CHANGED
@@ -47,11 +47,32 @@ def log_results(result: Dataset, args: Dict[str, str]):
|
|
47 |
result.map(write_to_file, with_indices=True)
|
48 |
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
def normalize_text(text: str) -> str:
|
51 |
"""DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
|
52 |
-
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
55 |
text = re.sub(chars_to_ignore_regex, "", text.lower())
|
56 |
|
57 |
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
|
@@ -132,6 +153,11 @@ if __name__ == "__main__":
|
|
132 |
default=None,
|
133 |
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
|
134 |
)
|
|
|
|
|
|
|
|
|
|
|
135 |
args = parser.parse_args()
|
136 |
|
137 |
main(args)
|
|
|
47 |
result.map(write_to_file, with_indices=True)
|
48 |
|
49 |
|
50 |
+
def replace_text(text):
|
51 |
+
text=text.replace('β', r'"')
|
52 |
+
text=text.replace('β', r'"')
|
53 |
+
text=text.replace('β', r'"')
|
54 |
+
text=text.replace('β', r'-')
|
55 |
+
text=text.replace('β', r' - ')
|
56 |
+
text=text.replace('Β΄', r"'")
|
57 |
+
text=text.replace('β', r"'")
|
58 |
+
text=text.replace('β', r"'")
|
59 |
+
text=text.replace('β', r"'")
|
60 |
+
text=text.replace("''", r'"')
|
61 |
+
text=text.replace('´´', r'"')
|
62 |
+
|
63 |
+
token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
|
64 |
+
for t in token_sequences_to_ignore:
|
65 |
+
text = " ".join(text.split(t))
|
66 |
+
return text
|
67 |
+
|
68 |
def normalize_text(text: str) -> str:
|
69 |
"""DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
|
70 |
+
# chars_to_ignore_regex = (
|
71 |
+
# f'[{"".join(args.chars_to_ignore)}]' if args.chars_to_ignore is not None else None
|
72 |
+
# )
|
73 |
+
text=replace_text(text)
|
74 |
+
chars_to_ignore_regex = '[,?.!\-\;\:"β%ββοΏ½βββ¦β"\'-]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
|
75 |
+
# print(chars_to_ignore_regex)
|
76 |
text = re.sub(chars_to_ignore_regex, "", text.lower())
|
77 |
|
78 |
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
|
|
|
153 |
default=None,
|
154 |
help="The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.",
|
155 |
)
|
156 |
+
parser.add_argument(
|
157 |
+
"--chars_to_ignore",
|
158 |
+
default=None,
|
159 |
+
help="characters to ignore in text",
|
160 |
+
)
|
161 |
args = parser.parse_args()
|
162 |
|
163 |
main(args)
|
log_mozilla-foundation_common_voice_7_0_hi_test_predictions.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
log_mozilla-foundation_common_voice_7_0_hi_test_targets.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
mozilla-foundation_common_voice_7_0_hi_test_eval_results.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
WER: 0.38507940416102426
|
2 |
+
CER: 0.13082663533294167
|
run_eval.sh
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python eval.py \
|
2 |
+
--model_id "checkpoint-8000" \
|
3 |
+
--dataset "mozilla-foundation/common_voice_7_0" \
|
4 |
+
--config "hi" \
|
5 |
+
--split "test" \
|
6 |
+
--chars_to_ignore , ? . ! - \; \: \" β % β β οΏ½ \
|
7 |
+
--log_outputs
|
8 |
+
|
run_speech_recognition_ctc.py
CHANGED
@@ -435,16 +435,42 @@ def main():
|
|
435 |
# that make training complicated and do not help in transcribing the speech
|
436 |
# E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
|
437 |
# that could be easily picked up by the model
|
438 |
-
|
439 |
-
|
440 |
-
|
|
|
441 |
text_column_name = data_args.text_column_name
|
442 |
|
443 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
444 |
if chars_to_ignore_regex is not None:
|
445 |
-
|
446 |
else:
|
447 |
-
|
|
|
|
|
448 |
return batch
|
449 |
|
450 |
with training_args.main_process_first(desc="dataset map special characters removal"):
|
|
|
435 |
# that make training complicated and do not help in transcribing the speech
|
436 |
# E.g. characters, such as `,` and `.` do not really have an acoustic characteristic
|
437 |
# that could be easily picked up by the model
|
438 |
+
# chars_to_ignore_regex = (
|
439 |
+
# f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None
|
440 |
+
# )
|
441 |
+
chars_to_ignore_regex = '[,?.!\-\;\:"β%ββοΏ½βββ¦β]'
|
442 |
text_column_name = data_args.text_column_name
|
443 |
|
444 |
+
|
445 |
+
def replace_text(text):
|
446 |
+
text=text.replace('β', r'"')
|
447 |
+
text=text.replace('β', r'"')
|
448 |
+
text=text.replace('β', r'"')
|
449 |
+
text=text.replace('β', r'-')
|
450 |
+
text=text.replace('β', r' - ')
|
451 |
+
text=text.replace('Β΄', r"'")
|
452 |
+
text=text.replace('β', r"'")
|
453 |
+
text=text.replace('β', r"'")
|
454 |
+
text=text.replace('β', r"'")
|
455 |
+
text=text.replace("''", r'"')
|
456 |
+
text=text.replace('´´', r'"')
|
457 |
+
|
458 |
+
token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
|
459 |
+
for t in token_sequences_to_ignore:
|
460 |
+
text = " ".join(text.split(t))
|
461 |
+
return text
|
462 |
+
|
463 |
+
def remove_special_characters(text):
|
464 |
+
text=batch[text_column_name]
|
465 |
+
text=replace_text(text)
|
466 |
+
|
467 |
+
|
468 |
if chars_to_ignore_regex is not None:
|
469 |
+
target_text = re.sub(chars_to_ignore_regex, "", text).lower() + " "
|
470 |
else:
|
471 |
+
target_text = text.lower() + " "
|
472 |
+
|
473 |
+
batch["target_text"]=target_text
|
474 |
return batch
|
475 |
|
476 |
with training_args.main_process_first(desc="dataset map special characters removal"):
|