--- language: - en - zh license: cc-by-sa-4.0 size_categories: - 10K - **Language(s):** English, Simplified Chinese, Mixed - **License:** Creative Common Attribution Share-Alike 4.0 International (CC-BY-SA 4.0) ## Dataset Creation ### Source Data https://huggingface.co/datasets/CAiRE/ASCEND #### Data Collection and Processing 1. Load from source ```python from datasets import load_dataset, Audio as DSAudio data_raw = load_dataset("CAiRE/ASCEND") data_raw = data_raw.cast_column("audio", DSAudio(sampling_rate=16000)) ``` 2. Clean stop words ```python import re def clean_transcripts(x): cjk = "[\u3400-\u4db5\u4e00-\u9fa5\u9fa6-\u9fbb\uf900-\ufa2d\ufa30-\ufa6a\ufa70-\ufad9\uff00-\uffef\u2e80-\u2eff\u3000-\u303f\u31c0-\u31ef\u2f00-\u2fdf\u2ff0-\u2fff\u3100-\u312f\u31a0-\u31bf\ufe10-\ufe1f\ufe30-\ufe4f\u2600-\u26ff\u2700-\u27bf\u3200-\u32ff\u3300-\u33ff]" x = re.sub(r'\.\.\.|\s|^|$', ' ', x) # expanding space allows matching " uh uh" case x = re.sub(rf"({cjk}|\s)([Uu][mh]|U[MH])({cjk}|\s)", r"\1 \3", x) # replace any uh surrounded by cjk or space x = x.replace('嗯', ' ') x = x.replace('呃', ' ') x = re.sub(r"\s+", " ", x) return x.strip() data = data_raw.map(lambda x: {"transcription": clean_transcripts(x['transcription'])}) data = data.filter(lambda x: x["transcription"] != "") ``` 3. Isolate samples with UNKs ```python unks = data.filter(lambda x: "[UNK]" in x["transcription"]) unks.shape ``` > {'train': (402, 9), 'test': (36, 9), 'validation': (63, 9)} 4. Load whisper model. For Chinese, medium performs best. ```python from stable_whisper import load_faster_whisper model = load_faster_whisper( "medium", device="cuda", compute_type="float16", ) ``` 5. Resolve UNKs with whisper-medium ```python from sacrebleu.tokenizers.tokenizer_zh import TokenizerZh from whisper_normalizer.basic import BasicTextNormalizer import cn2an import json import jiwer from tqdm.auto import tqdm sacretok = TokenizerZh() whisper_norm = BasicTextNormalizer() def compute_mer(hyp, ref): def norm(x): return sacretok(cn2an.transform(whisper_norm(x), "an2cn")) return jiwer.process_words(norm(hyp), norm(ref)).wer * 100 adjusted = {split:dict() for split in data} double_check = {split:dict() for split in data} UNK = "[UNK]" for split in data: trange = tqdm(unks[split], desc=split) for i,sample in enumerate(trange): transcription = sample['transcription'] texts = transcription.split(UNK) words = [] for sent in texts[1:]: for w in sacretok(sent).split(): if w not in words: words += [w] keyword = "关键词" header = "字幕" prompt = f"{keyword} \"{'/'.join(words)}\" {header} " result = model.transcribe_stable( audio=sample['audio']['array'], initial_prompt=prompt, # encourage reuse of words prefix=texts[0], # forcing start to follow real start language=sample['language'].replace('mixed', 'zh'), regroup=False, verbose=None, no_speech_threshold=1.0, suppress_silence=False, word_timestamps=True # though unused, timestamps reduce hallucination ).merge_all_segments() adjustment = clean_transcripts( result.text .replace(keyword, " ") .replace(header, " ") ) mer=compute_mer(transcription, adjustment) adjusted[split][sample['id']] = adjustment trange.set_postfix(mer=f"{mer:.2f}", dc=len(double_check[split])) if mer > 30: double_check[split][sample['id']] = mer print(transcription, "||", adjustment) if i % 5 == 0 or i == len(unks[split]) - 1: with open(f"checkpoint_{split}.json", "w") as f: json.dump(adjusted[split], f) ``` 6. Replace UNK utterances with resolved ones ```python from datasets import DatasetDict import json adjusted_transcripts = {} for split in data_raw: with open(f"checkpoint_{split}.json", "r", encoding="utf8") as f: adjusted_transcripts[split] = json.load(f) UNK = "[UNK]" def fix_unk(sample, adjusted_dict): def bad(orig, new): return sacretok(new) in sacretok(orig) transcription = clean_transcripts(sample['transcription'].replace(UNK, "")) sid = sample['id'] adjustment = adjusted_dict.get(sid, transcription) if bad(transcription, adjustment): # adjustment worse than just removing UNK # print("skipped:", transcription, "||", adjustment) adjustment = transcription return {"transcription": adjustment} data = DatasetDict({ split: data_raw[split].map(lambda x: fix_unk(x, adjusted_transcripts[split]), load_from_cache_file=False) for split in data_raw }) data = data.sort(["session_id","id"], load_from_cache_file=False) for split in data: for line in data[split]['transcription']: assert UNK not in line ``` > train adjusted 402 samples, 75 of which just removes UNKs. > test adjusted 36 samples, 9 of which just removes UNKs. > validation adjusted 63 samples, 7 of which just removes UNKs.