georgechang8 commited on
Commit
d0c725b
1 Parent(s): 0ab306f

Update README.md

Browse files

Updated clean procedure

Files changed (1) hide show
  1. README.md +78 -59
README.md CHANGED
@@ -54,7 +54,7 @@ configs:
54
  This dataset is derived from CAiRE/ASCEND. More information is available at https://huggingface.co/datasets/CAiRE/ASCEND.
55
 
56
  - Removed 嗯 呃 um uh
57
- - Resolved "[UNK]"'s using whisper-medium
58
 
59
  ## Dataset Details
60
 
@@ -62,7 +62,7 @@ This dataset is derived from CAiRE/ASCEND. More information is available at http
62
 
63
  <!-- Provide a longer summary of what this dataset is. -->
64
 
65
- - **Language(s) (NLP):** English, Simplified Chinese, Mixed
66
  - **License:** Creative Common Attribution Share-Alike 4.0 International (CC-BY-SA 4.0)
67
 
68
  ## Dataset Creation
@@ -77,15 +77,20 @@ https://huggingface.co/datasets/CAiRE/ASCEND
77
  <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
78
  1. Load from source
79
  ```python
80
- from datasets import load_dataset
81
  data_raw = load_dataset("CAiRE/ASCEND")
 
82
  ```
83
  2. Clean stop words
84
  ```python
85
  import re
86
 
87
  def clean_transcripts(x):
88
- x = re.sub(r"[嗯呃]|\s+(um|uh)", " ", " " + x)
 
 
 
 
89
  x = re.sub(r"\s+", " ", x)
90
  return x.strip()
91
 
@@ -110,88 +115,102 @@ model = load_faster_whisper(
110
  ```
111
  5. Resolve UNKs with whisper-medium
112
  ```python
113
- from stable_whisper.audio.utils import SAMPLE_RATE, resample
114
  from sacrebleu.tokenizers.tokenizer_zh import TokenizerZh
 
 
115
  import json
 
116
  from tqdm.auto import tqdm
117
- from dataclasses import dataclass, asdict
118
 
119
  sacretok = TokenizerZh()
 
 
 
 
 
120
 
121
- @dataclass
122
- class Entry:
123
- id: str
124
- transcription: str
125
- adjustment: str
126
 
127
- adjusted = {'validation':[], 'test':[], 'train':[]}
128
 
129
- for split in unk:
130
  trange = tqdm(unks[split], desc=split)
131
  for i,sample in enumerate(trange):
132
- if sample['audio']['sampling_rate'] != SAMPLE_RATE:
133
- sample['audio'].update(
134
- array=resample(sample['audio']['array'], sample['audio']['sampling_rate'], SAMPLE_RATE),
135
- sampling_rate=SAMPLE_RATE
136
- )
137
- texts = sample['transcription'].split("[UNK]")
138
- words = sacretok(" ".join(texts[1:]))
139
- keyword = "关键词:"
140
- header = "文本:"
141
- prompt = f"{keyword}\"{words}\"\n{header}" # encourages these words
142
  result = model.transcribe_stable(
143
  audio=sample['audio']['array'],
144
- initial_prompt=prompt,
145
- prefix=texts[0], # forced decoding the same prefix
146
  language=sample['language'].replace('mixed', 'zh'),
147
  regroup=False,
148
  verbose=None,
149
  no_speech_threshold=1.0,
150
  suppress_silence=False,
151
- word_timestamps=True
152
  ).merge_all_segments()
153
- adjustment = re.sub(
154
- r"\s",
155
- " ",
156
- result.text.replace(keyword, " ").replace(header, " ")
157
- ).strip()
158
- entry = Entry(
159
- id=sample['id'],
160
- transcription=sample['transcription'],
161
- adjustment=adjustment,
162
  )
163
- adjusted[split].append(entry)
164
- if i % 5 == 0:
 
 
 
 
 
165
  with open(f"checkpoint_{split}.json", "w") as f:
166
- json.dump(list(map(asdict, adjusted[split])), f)
167
  ```
168
  6. Replace UNK utterances with resolved ones
169
  ```python
170
- adjusted = {}
 
 
171
  for split in data_raw:
172
  with open(f"checkpoint_{split}.json", "r", encoding="utf8") as f:
173
- adjusted[split] = json.load(f)
174
-
175
- def fix_unk(data):
176
- for split in data:
177
- n_adjusted = 0
178
- for sample in adjusted[split]:
179
- sid = sample['id']
180
- adjust = sample['adjustment']
181
- index = data[split]['id'].index(sid)
182
- data[split][index]['transcription'] = adjust
183
- n_adjusted += 1
184
- print(split, "adjusted", n_adjusted, "samples.")
185
-
186
- return data
 
 
 
 
 
 
 
 
 
 
187
 
188
  data = fix_unk(data)
189
  data = data.filter(lambda x: x["transcription"] != "")
190
- data = data.sort("id")
191
- data.save_to_disk('./ASCEND_CLEAN')
192
  ```
193
- > train adjusted 401 samples.
194
-
195
- > test adjusted 36 samples.
196
 
197
- > validation adjusted 61 samples.
 
54
  This dataset is derived from CAiRE/ASCEND. More information is available at https://huggingface.co/datasets/CAiRE/ASCEND.
55
 
56
  - Removed 嗯 呃 um uh
57
+ - Resolved [UNK]'s using whisper-medium
58
 
59
  ## Dataset Details
60
 
 
62
 
63
  <!-- Provide a longer summary of what this dataset is. -->
64
 
65
+ - **Language(s):** English, Simplified Chinese, Mixed
66
  - **License:** Creative Common Attribution Share-Alike 4.0 International (CC-BY-SA 4.0)
67
 
68
  ## Dataset Creation
 
77
  <!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
78
  1. Load from source
79
  ```python
80
+ from datasets import load_dataset, Audio as DSAudio
81
  data_raw = load_dataset("CAiRE/ASCEND")
82
+ data_raw = data_raw.cast_column("audio", DSAudio(sampling_rate=16000))
83
  ```
84
  2. Clean stop words
85
  ```python
86
  import re
87
 
88
  def clean_transcripts(x):
89
+ cjk = "[\u3400-\u4db5\u4e00-\u9fa5\u9fa6-\u9fbb\uf900-\ufa2d\ufa30-\ufa6a\ufa70-\ufad9\uff00-\uffef\u2e80-\u2eff\u3000-\u303f\u31c0-\u31ef\u2f00-\u2fdf\u2ff0-\u2fff\u3100-\u312f\u31a0-\u31bf\ufe10-\ufe1f\ufe30-\ufe4f\u2600-\u26ff\u2700-\u27bf\u3200-\u32ff\u3300-\u33ff]"
90
+ x = re.sub(r'\.\.\.|\s|^|$', ' ', x) # expanding space allows matching " uh uh" case
91
+ x = re.sub(rf"({cjk}|\s)([Uu][mh]|U[MH])({cjk}|\s)", r"\1 \3", x) # replace any uh surrounded by cjk or space
92
+ x = x.replace('嗯', ' ')
93
+ x = x.replace('呃', ' ')
94
  x = re.sub(r"\s+", " ", x)
95
  return x.strip()
96
 
 
115
  ```
116
  5. Resolve UNKs with whisper-medium
117
  ```python
 
118
  from sacrebleu.tokenizers.tokenizer_zh import TokenizerZh
119
+ from whisper_normalizer.basic import BasicTextNormalizer
120
+ import cn2an
121
  import json
122
+ import jiwer
123
  from tqdm.auto import tqdm
 
124
 
125
  sacretok = TokenizerZh()
126
+ whisper_norm = BasicTextNormalizer()
127
+ def compute_mer(hyp, ref):
128
+ def norm(x):
129
+ return sacretok(cn2an.transform(whisper_norm(x), "an2cn"))
130
+ return jiwer.process_words(norm(hyp), norm(ref)).wer * 100
131
 
132
+ adjusted = {split:dict() for split in data}
133
+ double_check = {split:dict() for split in data}
 
 
 
134
 
135
+ UNK = "[UNK]"
136
 
137
+ for split in data:
138
  trange = tqdm(unks[split], desc=split)
139
  for i,sample in enumerate(trange):
140
+ transcription = sample['transcription']
141
+ texts = transcription.split(UNK)
142
+ words = []
143
+ for sent in texts[1:]:
144
+ for w in sacretok(sent).split():
145
+ if w not in words:
146
+ words += [w]
147
+ keyword = "关键词"
148
+ header = "字幕"
149
+ prompt = f"{keyword} \"{'/'.join(words)}\" {header} "
150
  result = model.transcribe_stable(
151
  audio=sample['audio']['array'],
152
+ initial_prompt=prompt, # encourage reuse of words
153
+ prefix=texts[0], # forcing start to follow real start
154
  language=sample['language'].replace('mixed', 'zh'),
155
  regroup=False,
156
  verbose=None,
157
  no_speech_threshold=1.0,
158
  suppress_silence=False,
159
+ word_timestamps=True # though unused, timestamps reduce hallucination
160
  ).merge_all_segments()
161
+ adjustment = clean_transcripts(
162
+ result.text
163
+ .replace(keyword, " ")
164
+ .replace(header, " ")
 
 
 
 
 
165
  )
166
+ mer=compute_mer(transcription, adjustment)
167
+ adjusted[split][sample['id']] = adjustment
168
+ trange.set_postfix(mer=f"{mer:.2f}", dc=len(double_check[split]))
169
+ if mer > 30:
170
+ double_check[split][sample['id']] = mer
171
+ print(transcription, "||", adjustment)
172
+ if i % 5 == 0 or i == len(unks[split]) - 1:
173
  with open(f"checkpoint_{split}.json", "w") as f:
174
+ json.dump(adjusted[split], f)
175
  ```
176
  6. Replace UNK utterances with resolved ones
177
  ```python
178
+ import json
179
+
180
+ adjusted_transcripts = {}
181
  for split in data_raw:
182
  with open(f"checkpoint_{split}.json", "r", encoding="utf8") as f:
183
+ adjusted_transcripts[split] = json.load(f)
184
+
185
+ UNK = "[UNK]"
186
+
187
+ def fix_unk(ds):
188
+ def bad(orig, new):
189
+ return sacretok(new) in sacretok(orig)
190
+
191
+ for split in ds:
192
+ adjusted = 0
193
+ copied = 0
194
+ for sid, adjustment in adjusted_transcripts[split].items():
195
+ index = ds[split]['id'].index(sid)
196
+ transcription = ds[split][index]['transcription'].replace(UNK, "")
197
+ if bad(transcription, adjustment):
198
+ # adjustment worse than just removing UNK
199
+ # print("skipped:", transcription, "||", adjustment)
200
+ adjustment = transcription
201
+ copied += 1
202
+ ds[split][index]['transcription'] = adjustment
203
+ adjusted += 1
204
+ print(f"{split} adjusted {adjusted} samples, {copied} of which just removes UNKs.")
205
+
206
+ return ds
207
 
208
  data = fix_unk(data)
209
  data = data.filter(lambda x: x["transcription"] != "")
210
+ data = data.sort(["session_id","id"])
 
211
  ```
212
+ > train adjusted 402 samples, 75 of which just removes UNKs.
213
+
214
+ > test adjusted 36 samples, 9 of which just removes UNKs.
215
 
216
+ > validation adjusted 63 samples, 7 of which just removes UNKs.