infinitejoy commited on
Commit
fa8e059
1 Parent(s): 4da9fbb

add tokenizer

Browse files
Files changed (5) hide show
  1. added_tokens.json +1 -0
  2. eval.py +256 -0
  3. special_tokens_map.json +1 -0
  4. tokenizer_config.json +1 -0
  5. vocab.json +1 -0
added_tokens.json ADDED
@@ -0,0 +1 @@
 
1
+ {"<s>": 50, "</s>": 51}
eval.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ import argparse
3
+ import re
4
+ from typing import Dict
5
+
6
+ from datasets import Audio, Dataset, load_dataset, load_metric
7
+
8
+ from transformers import AutoFeatureExtractor, pipeline
9
+
10
+
11
+ def log_results(result: Dataset, args: Dict[str, str]):
12
+ """DO NOT CHANGE. This function computes and logs the result metrics."""
13
+
14
+ log_outputs = args.log_outputs
15
+ dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
16
+
17
+ # load metric
18
+ wer = load_metric("wer")
19
+ cer = load_metric("cer")
20
+
21
+ # compute metrics
22
+ wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
23
+ cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
24
+
25
+ # print & log results
26
+ result_str = f"WER: {wer_result}\n" f"CER: {cer_result}"
27
+ print(result_str)
28
+
29
+ with open(f"{dataset_id}_eval_results.txt", "w") as f:
30
+ f.write(result_str)
31
+
32
+ # log all results in text file. Possibly interesting for analysis
33
+ if log_outputs is not None:
34
+ pred_file = f"log_{dataset_id}_predictions.txt"
35
+ target_file = f"log_{dataset_id}_targets.txt"
36
+
37
+ with open(pred_file, "w") as p, open(target_file, "w") as t:
38
+
39
+ # mapping function to write output
40
+ def write_to_file(batch, i):
41
+ p.write(f"{i}" + "\n")
42
+ p.write(batch["prediction"] + "\n")
43
+ t.write(f"{i}" + "\n")
44
+ t.write(batch["target"] + "\n")
45
+
46
+ result.map(write_to_file, with_indices=True)
47
+
48
+
49
+ def normalize_text(text: str) -> str:
50
+ """DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
51
+
52
+ chars_to_ignore_regex = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
53
+
54
+ text = re.sub(chars_to_ignore_regex, "", text.lower())
55
+
56
+ # In addition, we can normalize the target text, e.g. removing new lines characters etc...
57
+ # note that order is important here!
58
+ token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
59
+
60
+ for t in token_sequences_to_ignore:
61
+ text = " ".join(text.split(t))
62
+
63
+ return text
64
+
65
+
66
+ def main(args):
67
+ # load dataset
68
+ dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
69
+
70
+ # for testing: only process the first two examples as a test
71
+ # dataset = dataset.select(range(10))
72
+
73
+ # load processor
74
+ feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
75
+ sampling_rate = feature_extractor.sampling_rate
76
+
77
+ # resample audio
78
+ dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
79
+
80
+ # load eval pipeline
81
+ asr = pipeline("automatic-speech-recognition", model=args.model_id)
82
+
83
+ # map function to decode audio
84
+ def map_to_pred(batch):
85
+ prediction = asr(
86
+ batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
87
+ )
88
+
89
+ batch["prediction"] = prediction["text"]
90
+ batch["target"] = normalize_text(batch["sentence"])
91
+ return batch
92
+
93
+ # run inference on all examples
94
+ result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
95
+
96
+ # compute and log_results
97
+ # do not change function below
98
+ log_results(result, args)
99
+
100
+
101
+ if __name__ == "__main__":
102
+ parser = argparse.ArgumentParser()
103
+
104
+ parser.add_argument(
105
+ "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
106
+ )
107
+ parser.add_argument(
108
+ "--dataset",
109
+ type=str,
110
+ required=True,
111
+ help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
112
+ )
113
+ parser.add_argument(
114
+ "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
115
+ )
116
+ parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
117
+ parser.add_argument(
118
+ "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
119
+ )
120
+ parser.add_argument(
121
+ "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
122
+ )
123
+ parser.add_argument(
124
+ "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
125
+ )
126
+ args = parser.parse_args()
127
+
128
+ main(args)
129
+ #!/usr/bin/env python3
130
+ import argparse
131
+ import re
132
+ from typing import Dict
133
+
134
+ from datasets import Audio, Dataset, load_dataset, load_metric
135
+
136
+ from transformers import AutoFeatureExtractor, pipeline
137
+
138
+
139
+ def log_results(result: Dataset, args: Dict[str, str]):
140
+ """DO NOT CHANGE. This function computes and logs the result metrics."""
141
+
142
+ log_outputs = args.log_outputs
143
+ dataset_id = "_".join(args.dataset.split("/") + [args.config, args.split])
144
+
145
+ # load metric
146
+ wer = load_metric("wer")
147
+ cer = load_metric("cer")
148
+
149
+ # compute metrics
150
+ wer_result = wer.compute(references=result["target"], predictions=result["prediction"])
151
+ cer_result = cer.compute(references=result["target"], predictions=result["prediction"])
152
+
153
+ # print & log results
154
+ result_str = f"WER: {wer_result}\n" f"CER: {cer_result}"
155
+ print(result_str)
156
+
157
+ with open(f"{dataset_id}_eval_results.txt", "w") as f:
158
+ f.write(result_str)
159
+
160
+ # log all results in text file. Possibly interesting for analysis
161
+ if log_outputs is not None:
162
+ pred_file = f"log_{dataset_id}_predictions.txt"
163
+ target_file = f"log_{dataset_id}_targets.txt"
164
+
165
+ with open(pred_file, "w") as p, open(target_file, "w") as t:
166
+
167
+ # mapping function to write output
168
+ def write_to_file(batch, i):
169
+ p.write(f"{i}" + "\n")
170
+ p.write(batch["prediction"] + "\n")
171
+ t.write(f"{i}" + "\n")
172
+ t.write(batch["target"] + "\n")
173
+
174
+ result.map(write_to_file, with_indices=True)
175
+
176
+
177
+ def normalize_text(text: str) -> str:
178
+ """DO ADAPT FOR YOUR USE CASE. this function normalizes the target text."""
179
+
180
+ chars_to_ignore_regex = '[,?.!\-\;\:"“%‘”�—’…–]' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
181
+
182
+ text = re.sub(chars_to_ignore_regex, "", text.lower())
183
+
184
+ # In addition, we can normalize the target text, e.g. removing new lines characters etc...
185
+ # note that order is important here!
186
+ token_sequences_to_ignore = ["\n\n", "\n", " ", " "]
187
+
188
+ for t in token_sequences_to_ignore:
189
+ text = " ".join(text.split(t))
190
+
191
+ return text
192
+
193
+
194
+ def main(args):
195
+ # load dataset
196
+ dataset = load_dataset(args.dataset, args.config, split=args.split, use_auth_token=True)
197
+
198
+ # for testing: only process the first two examples as a test
199
+ # dataset = dataset.select(range(10))
200
+
201
+ # load processor
202
+ feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_id)
203
+ sampling_rate = feature_extractor.sampling_rate
204
+
205
+ # resample audio
206
+ dataset = dataset.cast_column("audio", Audio(sampling_rate=sampling_rate))
207
+
208
+ # load eval pipeline
209
+ asr = pipeline("automatic-speech-recognition", model=args.model_id)
210
+
211
+ # map function to decode audio
212
+ def map_to_pred(batch):
213
+ prediction = asr(
214
+ batch["audio"]["array"], chunk_length_s=args.chunk_length_s, stride_length_s=args.stride_length_s
215
+ )
216
+
217
+ batch["prediction"] = prediction["text"]
218
+ batch["target"] = normalize_text(batch["sentence"])
219
+ return batch
220
+
221
+ # run inference on all examples
222
+ result = dataset.map(map_to_pred, remove_columns=dataset.column_names)
223
+
224
+ # compute and log_results
225
+ # do not change function below
226
+ log_results(result, args)
227
+
228
+
229
+ if __name__ == "__main__":
230
+ parser = argparse.ArgumentParser()
231
+
232
+ parser.add_argument(
233
+ "--model_id", type=str, required=True, help="Model identifier. Should be loadable with 🤗 Transformers"
234
+ )
235
+ parser.add_argument(
236
+ "--dataset",
237
+ type=str,
238
+ required=True,
239
+ help="Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets",
240
+ )
241
+ parser.add_argument(
242
+ "--config", type=str, required=True, help="Config of the dataset. *E.g.* `'en'` for Common Voice"
243
+ )
244
+ parser.add_argument("--split", type=str, required=True, help="Split of the dataset. *E.g.* `'test'`")
245
+ parser.add_argument(
246
+ "--chunk_length_s", type=float, default=None, help="Chunk length in seconds. Defaults to 5 seconds."
247
+ )
248
+ parser.add_argument(
249
+ "--stride_length_s", type=float, default=None, help="Stride of the audio chunks. Defaults to 1 second."
250
+ )
251
+ parser.add_argument(
252
+ "--log_outputs", action="store_true", help="If defined, write outputs to log file for analysis."
253
+ )
254
+ args = parser.parse_args()
255
+
256
+ main(args)
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "[UNK]", "pad_token": "[PAD]", "additional_special_tokens": [{"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}]}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
1
+ {"unk_token": "[UNK]", "bos_token": "<s>", "eos_token": "</s>", "pad_token": "[PAD]", "do_lower_case": false, "word_delimiter_token": "|", "special_tokens_map_file": null, "tokenizer_file": null, "name_or_path": "wav2vec2-large-xls-r-300m-armenian/", "tokenizer_class": "Wav2Vec2CTCTokenizer"}
vocab.json ADDED
@@ -0,0 +1 @@
 
1
+ {"(": 1, ")": 2, "«": 3, "»": 4, "՛": 5, "՝": 6, "՞": 7, "ա": 8, "բ": 9, "գ": 10, "դ": 11, "ե": 12, "զ": 13, "է": 14, "ը": 15, "թ": 16, "ժ": 17, "ի": 18, "լ": 19, "խ": 20, "ծ": 21, "կ": 22, "հ": 23, "ձ": 24, "ղ": 25, "ճ": 26, "մ": 27, "յ": 28, "ն": 29, "շ": 30, "ո": 31, "չ": 32, "պ": 33, "ջ": 34, "ռ": 35, "ս": 36, "վ": 37, "տ": 38, "ր": 39, "ց": 40, "ւ": 41, "փ": 42, "ք": 43, "օ": 44, "ֆ": 45, "և": 46, "։": 47, "|": 0, "[UNK]": 48, "[PAD]": 49}