Datasets:
georgechang8
commited on
Commit
•
d2f15aa
1
Parent(s):
2fa5982
Update README.md
Browse files
README.md
CHANGED
@@ -42,4 +42,156 @@ configs:
|
|
42 |
path: data/test-*
|
43 |
- split: validation
|
44 |
path: data/validation-*
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
path: data/test-*
|
43 |
- split: validation
|
44 |
path: data/validation-*
|
45 |
+
license: cc-by-sa-4.0
|
46 |
+
language:
|
47 |
+
- en
|
48 |
+
- zh
|
49 |
+
size_categories:
|
50 |
+
- 10K<n<100K
|
51 |
---
|
52 |
+
# Dataset Card for Dataset Name
|
53 |
+
|
54 |
+
This dataset is derived from CAiRE/ASCEND. More information is available at https://huggingface.co/datasets/CAiRE/ASCEND.
|
55 |
+
|
56 |
+
- Removed 嗯 呃 um uh
|
57 |
+
- Resolved "[UNK]"'s using whisper-medium
|
58 |
+
|
59 |
+
## Dataset Details
|
60 |
+
|
61 |
+
### Dataset Description
|
62 |
+
|
63 |
+
<!-- Provide a longer summary of what this dataset is. -->
|
64 |
+
|
65 |
+
- **Language(s) (NLP):** English, Simplified Chinese, Mixed
|
66 |
+
- **License:** Creative Common Attribution Share-Alike 4.0 International (CC-BY-SA 4.0)
|
67 |
+
|
68 |
+
## Dataset Creation
|
69 |
+
|
70 |
+
### Source Data
|
71 |
+
|
72 |
+
<!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences, ...). -->
|
73 |
+
https://huggingface.co/datasets/CAiRE/ASCEND
|
74 |
+
|
75 |
+
#### Data Collection and Processing
|
76 |
+
|
77 |
+
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
|
78 |
+
1. Load from source
|
79 |
+
```python
|
80 |
+
from datasets import load_dataset
|
81 |
+
data_raw = load_dataset("CAiRE/ASCEND")
|
82 |
+
```
|
83 |
+
2. Clean stop words
|
84 |
+
```python
|
85 |
+
import re
|
86 |
+
|
87 |
+
def clean_transcripts(x):
|
88 |
+
x = re.sub(r"[嗯呃]|\s+(um|uh)", " ", " " + x)
|
89 |
+
x = re.sub(r"\s+", " ", x)
|
90 |
+
return x.strip()
|
91 |
+
|
92 |
+
data = data_raw.map(lambda x: {"transcription": clean_transcripts(x['transcription'])})
|
93 |
+
data = data.filter(lambda x: x["transcription"] != "")
|
94 |
+
```
|
95 |
+
3. Isolate samples with UNKs
|
96 |
+
```python
|
97 |
+
unks = data.filter(lambda x: "[UNK]" in x["transcription"])
|
98 |
+
unks.shape
|
99 |
+
```
|
100 |
+
> {'train': (402, 9), 'test': (36, 9), 'validation': (63, 9)}
|
101 |
+
|
102 |
+
4. Load whisper model. For Chinese, medium performs best.
|
103 |
+
```python
|
104 |
+
from stable_whisper import load_faster_whisper
|
105 |
+
model = load_faster_whisper(
|
106 |
+
"medium",
|
107 |
+
device="cuda",
|
108 |
+
compute_type="float16",
|
109 |
+
)
|
110 |
+
```
|
111 |
+
5. Resolve UNKs with whisper-medium
|
112 |
+
```python
|
113 |
+
from stable_whisper.audio.utils import SAMPLE_RATE, resample
|
114 |
+
from sacrebleu.tokenizers.tokenizer_zh import TokenizerZh
|
115 |
+
import json
|
116 |
+
from tqdm.auto import tqdm
|
117 |
+
from dataclasses import dataclass, asdict
|
118 |
+
|
119 |
+
sacretok = TokenizerZh()
|
120 |
+
|
121 |
+
@dataclass
|
122 |
+
class Entry:
|
123 |
+
id: str
|
124 |
+
transcription: str
|
125 |
+
adjustment: str
|
126 |
+
|
127 |
+
adjusted = {'validation':[], 'test':[], 'train':[]}
|
128 |
+
|
129 |
+
for split in unk:
|
130 |
+
trange = tqdm(unks[split], desc=split)
|
131 |
+
for i,sample in enumerate(trange):
|
132 |
+
if sample['audio']['sampling_rate'] != SAMPLE_RATE:
|
133 |
+
sample['audio'].update(
|
134 |
+
array=resample(sample['audio']['array'], sample['audio']['sampling_rate'], SAMPLE_RATE),
|
135 |
+
sampling_rate=SAMPLE_RATE
|
136 |
+
)
|
137 |
+
texts = sample['transcription'].split("[UNK]")
|
138 |
+
words = sacretok(" ".join(texts[1:]))
|
139 |
+
keyword = "关键词:"
|
140 |
+
header = "文本:"
|
141 |
+
prompt = f"{keyword}\"{words}\"\n{header}" # encourages these words
|
142 |
+
result = model.transcribe_stable(
|
143 |
+
audio=sample['audio']['array'],
|
144 |
+
initial_prompt=prompt,
|
145 |
+
prefix=texts[0], # forced decoding the same prefix
|
146 |
+
language=sample['language'].replace('mixed', 'zh'),
|
147 |
+
regroup=False,
|
148 |
+
verbose=None,
|
149 |
+
no_speech_threshold=1.0,
|
150 |
+
suppress_silence=False,
|
151 |
+
word_timestamps=True
|
152 |
+
).merge_all_segments()
|
153 |
+
adjustment = re.sub(
|
154 |
+
r"\s",
|
155 |
+
" ",
|
156 |
+
result.text.replace(keyword, " ").replace(header, " ")
|
157 |
+
).strip()
|
158 |
+
entry = Entry(
|
159 |
+
id=sample['id'],
|
160 |
+
transcription=sample['transcription'],
|
161 |
+
adjustment=adjustment,
|
162 |
+
)
|
163 |
+
adjusted[split].append(entry)
|
164 |
+
if i % 5 == 0:
|
165 |
+
with open(f"checkpoint_{split}.json", "w") as f:
|
166 |
+
json.dump(list(map(asdict, adjusted[split])), f)
|
167 |
+
```
|
168 |
+
6. Replace UNK utterances with resolved ones
|
169 |
+
```python
|
170 |
+
adjusted = {}
|
171 |
+
for split in data_raw:
|
172 |
+
with open(f"checkpoint_{split}.json", "r", encoding="utf8") as f:
|
173 |
+
adjusted[split] = json.load(f)
|
174 |
+
|
175 |
+
def fix_unk(data):
|
176 |
+
for split in data:
|
177 |
+
n_adjusted = 0
|
178 |
+
for sample in adjusted[split]:
|
179 |
+
sid = sample['id']
|
180 |
+
adjust = sample['adjustment']
|
181 |
+
index = data[split]['id'].index(sid)
|
182 |
+
data[split][index]['transcription'] = adjust
|
183 |
+
n_adjusted += 1
|
184 |
+
print(split, "adjusted", n_adjusted, "samples.")
|
185 |
+
|
186 |
+
return data
|
187 |
+
|
188 |
+
data = fix_unk(data)
|
189 |
+
data = data.filter(lambda x: x["transcription"] != "")
|
190 |
+
data = data.sort("id")
|
191 |
+
data.save_to_disk('./ASCEND_CLEAN')
|
192 |
+
```
|
193 |
+
> train adjusted 401 samples.
|
194 |
+
|
195 |
+
> test adjusted 36 samples.
|
196 |
+
|
197 |
+
> validation adjusted 61 samples.
|