georgechang8 commited on
Commit
6def75d
1 Parent(s): 2fc82b5

update README and cleanup dirt

Browse files
30s/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:73c769060a40709afc63d54b3d639284f2773a885118900c584a39c535d44e5d
3
- size 302350374
 
 
 
 
30s/train-00000-of-00004.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3821abc69c71547c6cba61a239bbbda173bacae6ae0f51ad97bcbf4a50913ea0
3
- size 680677330
 
 
 
 
30s/train-00001-of-00004.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:67dcd4c702f081b89bd4e05d05db26aead4a86dcef8d7c2132f830b817abc3b8
3
- size 678462754
 
 
 
 
30s/train-00002-of-00004.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:60dc110f92db0999e38cdf66ba120edc031ee43878c4341e97afb393e1383134
3
- size 689175827
 
 
 
 
30s/train-00003-of-00004.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:61031ef2db92a1b227a838fd869be871b05491599d57c0298a60df82eed3c82a
3
- size 680114504
 
 
 
 
30s/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb029661af400af3069877f986e11d0d7d9dfa633ca682d0a8ad20a0686a7dc2
3
- size 349016357
 
 
 
 
README.md CHANGED
@@ -1,31 +1,5 @@
1
  ---
2
  dataset_info:
3
- - config_name: 30s
4
- features:
5
- - name: audio
6
- dtype:
7
- audio:
8
- sampling_rate: 16000
9
- - name: text
10
- dtype: string
11
- - name: id
12
- dtype: string
13
- - name: condition_on_prev
14
- dtype: string
15
- - name: duration
16
- dtype: float64
17
- splits:
18
- - name: train
19
- num_bytes: 3194102757.33
20
- num_examples: 3659
21
- - name: validation
22
- num_bytes: 410704018.0
23
- num_examples: 462
24
- - name: test
25
- num_bytes: 355118867.0
26
- num_examples: 398
27
- download_size: 3379797146
28
- dataset_size: 3959925642.33
29
  - config_name: clean
30
  features:
31
  - name: audio
@@ -50,30 +24,6 @@ dataset_info:
50
  num_examples: 3813
51
  download_size: 3311132741
52
  dataset_size: 3316440190.0
53
- - config_name: clean_extra
54
- features:
55
- - name: audio
56
- dtype:
57
- audio:
58
- sampling_rate: 16000
59
- - name: text
60
- dtype: string
61
- - name: id
62
- dtype: string
63
- - name: session_id
64
- dtype: string
65
- splits:
66
- - name: train
67
- num_bytes: 343502934.86
68
- num_examples: 5990
69
- - name: validation
70
- num_bytes: 21223349.0
71
- num_examples: 397
72
- - name: test
73
- num_bytes: 17421320.0
74
- num_examples: 282
75
- download_size: 398165352
76
- dataset_size: 382147603.86
77
  - config_name: default
78
  features:
79
  - name: audio
@@ -93,14 +43,6 @@ dataset_info:
93
  download_size: 3346820592
94
  dataset_size: 3341105554.6299996
95
  configs:
96
- - config_name: 30s
97
- data_files:
98
- - split: train
99
- path: 30s/train-*
100
- - split: validation
101
- path: 30s/validation-*
102
- - split: test
103
- path: 30s/test-*
104
  - config_name: clean
105
  data_files:
106
  - split: train
@@ -109,14 +51,6 @@ configs:
109
  path: clean/validation-*
110
  - split: test
111
  path: clean/test-*
112
- - config_name: clean_extra
113
- data_files:
114
- - split: train
115
- path: clean_extra/train-*
116
- - split: validation
117
- path: clean_extra/validation-*
118
- - split: test
119
- path: clean_extra/test-*
120
  - config_name: default
121
  data_files:
122
  - split: train
@@ -325,6 +259,57 @@ audio_dataset.push_to_hub(
325
  embed_external_files=True
326
  )
327
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  #### Data Cleaning
329
  1. The video `Pew9CK74axu` is manually cleaned
330
  ```python
@@ -350,7 +335,88 @@ def manual_edit(batch):
350
 
351
  audio_dataset_manual = audio_dataset.map(manual_edit, batched=True, num_proc=8)
352
  ```
353
- 2. General cleansing pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
354
  ```python
355
  import re
356
  import html
@@ -380,12 +446,13 @@ def clean_transcripts(x):
380
  cjk = "[\u3400-\u4db5\u4e00-\u9fa5\u9fa6-\u9fbb\uf900-\ufa2d\ufa30-\ufa6a\ufa70-\ufad9\uff00-\uffef\u2e80-\u2eff\u3000-\u303f\u31c0-\u31ef\u2f00-\u2fdf\u2ff0-\u2fff\u3100-\u312f\u31a0-\u31bf\ufe10-\ufe1f\ufe30-\ufe4f\u2600-\u26ff\u2700-\u27bf\u3200-\u32ff\u3300-\u33ff]"
381
  x = html.unescape(x)
382
  x = remove_emojies(x)
383
- dots = '\.{3,}'
384
- x = re.sub(rf'{dots}|…|\s|^|$', ' ', x) # expanding space allows matching " uh uh" case
 
385
  x = re.sub(rf"({cjk}|\s)([Uu][mh]|U[MH])({cjk}|\s)", r"\1 \3", x) # uh/um surrounded by cjk or space
386
  x = re.sub(r"([HhEe]mm+|[HE]MM+)", " ", x) # hmm emm
387
  x = re.sub(fr"\*+({cjk}+|[A-Za-z]+)\*+", " ", x) # *叹气*
388
- x = re.sub(r'[呃嗯]', ' ', x) # 呃嗯
389
  def replace_except(pattern, repl, z, excs):
390
  for e, t in excs:
391
  z = z.replace(e, t)
@@ -395,21 +462,28 @@ def clean_transcripts(x):
395
  return z
396
  # remove 恩 except for 恩桥 感恩 恩怨
397
  x = replace_except("恩", ' ', x, excs=[("感恩", "呃"),("恩桥", "嗯"),("恩怨", "emm")])
398
- # remove (...) except for 'Program Files (x86)'
399
- x = re.sub(r'[^()]*)', ' ', x)
400
  x = re.sub(r"\s+", " ", x)
 
401
  x = replace_except(r'\([^()]*\)', ' ', x, excs=[("Program Files (x86)", "呃")])
402
- puncs = r'[,?!。;?!,;~~]'
 
403
  x = re.sub(rf'({puncs})(?:\s*\1)+', r'\1', x) # ??? -> ?
404
  x = re.sub(rf"\s+({puncs})", r'\1', x) # text , -> text,
405
- sp_puncs = r'[?!,;]' # puncs with spaces
406
- x = re.sub(rf"({puncs}*{sp_puncs})([a-zA-Z])", r'\1 \2', x) # text,cont -> text, cont
407
  x = re.sub(rf"^[\s]*{puncs}+", "", x) # leading puncs
408
  x = re.sub(r"\s+", " ", x) # excess spaces
409
  return x.strip()
410
 
411
- audio_dataset_manual_clean = audio_dataset_manual.map(lambda x: {"text": list(map(clean_transcripts, x['text']))}, batched=True, num_proc=8)
412
- # push to hub
 
 
 
 
 
413
  audio_dataset_manual_clean.push_to_hub(
414
  "georgechang8/code_switch_yodas_zh",
415
  config_name="clean",
 
1
  ---
2
  dataset_info:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  - config_name: clean
4
  features:
5
  - name: audio
 
24
  num_examples: 3813
25
  download_size: 3311132741
26
  dataset_size: 3316440190.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  - config_name: default
28
  features:
29
  - name: audio
 
43
  download_size: 3346820592
44
  dataset_size: 3341105554.6299996
45
  configs:
 
 
 
 
 
 
 
 
46
  - config_name: clean
47
  data_files:
48
  - split: train
 
51
  path: clean/validation-*
52
  - split: test
53
  path: clean/test-*
 
 
 
 
 
 
 
 
54
  - config_name: default
55
  data_files:
56
  - split: train
 
259
  embed_external_files=True
260
  )
261
  ```
262
+ #### Extra (without punctuations)
263
+ Doing step 1-10, but reverse step 5 to look for ones without punctuations, this yields a small extra set:
264
+ ```python
265
+ extra_set = {
266
+ "37s5xmYYSM8",
267
+ "3ZVVBEugui4",
268
+ "-zHxyIuEw-8",
269
+ "Dngt6Ca8-3u",
270
+ "zJcle9SO98Q",
271
+ "murJVhx5dd0",
272
+ "6hCLoOVtM5Y", # test
273
+ "U-1tallz0hM",
274
+ "wfCUHCYJgIU",
275
+ "GrKoml8qb78",
276
+ "YMTMTFpV7_M",
277
+ "GJV0ZRzAARy",
278
+ "BtMii9364Fg",
279
+ "apK8JYOq6gI",
280
+ "IF-GnMzu7y8",
281
+ "0qJ61eujIVo",
282
+ "Okq02I_jTcA",
283
+ "hCnZlSbTht8",
284
+ "rMk21JBTisE", # validation
285
+ "s9qzwyIM3JI",
286
+ "NBf6Z9R1r7I",
287
+ "jIbc2Jzfa0g",
288
+ }
289
+ ```
290
+ ```
291
+ train:
292
+ 20 videos
293
+ validation:
294
+ 1 video
295
+ test:
296
+ 1 video
297
+ DatasetDict({
298
+ train: Dataset({
299
+ features: ['audio', 'text', 'id', 'session_id'],
300
+ num_rows: 5990
301
+ })
302
+ validation: Dataset({
303
+ features: ['audio', 'text', 'id', 'session_id'],
304
+ num_rows: 397
305
+ })
306
+ test: Dataset({
307
+ features: ['audio', 'text', 'id', 'session_id'],
308
+ num_rows: 282
309
+ })
310
+ })
311
+ ```
312
+
313
  #### Data Cleaning
314
  1. The video `Pew9CK74axu` is manually cleaned
315
  ```python
 
335
 
336
  audio_dataset_manual = audio_dataset.map(manual_edit, batched=True, num_proc=8)
337
  ```
338
+ 2. Low log-prob filtering
339
+ Using whisper-medium to compute the logprob, then filter by a handpicked threshold `-3.5`
340
+ ```python
341
+ # Get rid of low-prob videos
342
+ low_prob_set = {
343
+ '9lQs7INyYBQ',
344
+ 'HezOD6XPr_M',
345
+ 'HfeLdctBVGY',
346
+ 'IzfrgOUd2Uc',
347
+ 'UFklIGGKWN0',
348
+ '_x8LwaPRtCE',
349
+ 'eK9m6uCNN6Q',
350
+ 'erbZNpDMHN0',
351
+ 'l9BjfWr1_Pg',
352
+ 'nSStWkJtbR4',
353
+ 'wrEY_EzQEsy',
354
+ '3Zed0NHrmxo',
355
+ 'r29FW7K4iok',
356
+ 'MgdQuY0-abI',
357
+ 'yHh4rM2KX5Q'
358
+ }
359
+ audio_dataset_manual = audio_dataset_manual.filter(lambda batch: [s not in low_prob_set for s in batch['session_id']], num_proc=2, batched=True)
360
+ # 176 - 14 = 161 videos
361
+ ```
362
+ 3. train/dev/test split
363
+ ```python
364
+ from datasets import DatasetDict
365
+
366
+ validation_set = {
367
+ "AyPua3Mi9FU",
368
+ "r29FW7K4iok", # low prob
369
+ "GaUSbuZm5Ec",
370
+ "AKW9vmSy8lQ",
371
+ "3Zed0NHrmxo", # low prob
372
+ "ZHPFLOuT48u",
373
+ "RiCN24FLVLk",
374
+ "zrV_ZNWo8PQ",
375
+ # "rMk21JBTisE", # new (no punc) ==> not in 'default' config
376
+ }
377
+ test_set = {
378
+ "lH7bZ-8hF1o",
379
+ "WF4ovtdi6wu",
380
+ "MgdQuY0-abI", # low prob
381
+ "yHh4rM2KX5Q", # low prob
382
+ "e_cxHBDSqsM",
383
+ "NO6985Bf_Ro",
384
+ # "6hCLoOVtM5Y", # new (no punc) ==> not in 'default' config
385
+ }
386
+
387
+ def train_fn(batch):
388
+ return (z not in (validation_set|test_set) for z in batch['session_id'])
389
+ def validation_fn(batch):
390
+ return (z in validation_set for z in batch['session_id'])
391
+ def test_fn(batch):
392
+ return (z in test_set for z in batch['session_id'])
393
+
394
+ audio_dataset_manual = DatasetDict(
395
+ train=audio_dataset_manual.filter(train_fn, num_proc=2, batched=True),
396
+ validation=audio_dataset_manual.filter(validation_fn, num_proc=2, batched=True),
397
+ test=audio_dataset_manual.filter(test_fn, num_proc=2, batched=True)
398
+ )
399
+ ```
400
+ Don't forget to merge with extra set
401
+ ```python
402
+ from datasets import concatenate_datasets
403
+ ds_extra = load_dataset("georgechang8/code_switch_yodas_zh", "clean_extra") # no longer available
404
+ audio_dataset_manual = DatasetDict({
405
+ split: concatenate_datasets([audio_dataset_manual[split], ds_extra[split]])
406
+ for split in audio_dataset_manual
407
+ })
408
+ ```
409
+ Do sanity check
410
+ ```python
411
+ ds_full = audio_dataset_manual
412
+ for split in ds_full:
413
+ print(split, len(set(ds_full[split]['id'])))
414
+ assert len(set(ds_full['train']['id']) & set(ds_full['validation']['id'])) == 0
415
+ assert len(set(ds_full['train']['id']) & set(ds_full['test']['id'])) == 0
416
+ assert len(set(ds_full['test']['id']) & set(ds_full['validation']['id'])) == 0
417
+ ```
418
+
419
+ 4. General cleansing pipeline
420
  ```python
421
  import re
422
  import html
 
446
  cjk = "[\u3400-\u4db5\u4e00-\u9fa5\u9fa6-\u9fbb\uf900-\ufa2d\ufa30-\ufa6a\ufa70-\ufad9\uff00-\uffef\u2e80-\u2eff\u3000-\u303f\u31c0-\u31ef\u2f00-\u2fdf\u2ff0-\u2fff\u3100-\u312f\u31a0-\u31bf\ufe10-\ufe1f\ufe30-\ufe4f\u2600-\u26ff\u2700-\u27bf\u3200-\u32ff\u3300-\u33ff]"
447
  x = html.unescape(x)
448
  x = remove_emojies(x)
449
+ x = re.sub(r'\.{3,}', ' ', x)
450
+ x = re.sub(r'…+', ' ', x)
451
+ x = re.sub(r'\s+|^|$', ' ', x) # expanding space allows matching " uh uh" case
452
  x = re.sub(rf"({cjk}|\s)([Uu][mh]|U[MH])({cjk}|\s)", r"\1 \3", x) # uh/um surrounded by cjk or space
453
  x = re.sub(r"([HhEe]mm+|[HE]MM+)", " ", x) # hmm emm
454
  x = re.sub(fr"\*+({cjk}+|[A-Za-z]+)\*+", " ", x) # *叹气*
455
+ x = re.sub(r'[呃嗯]+', ' ', x) # 呃嗯
456
  def replace_except(pattern, repl, z, excs):
457
  for e, t in excs:
458
  z = z.replace(e, t)
 
462
  return z
463
  # remove 恩 except for 恩桥 感恩 恩怨
464
  x = replace_except("恩", ' ', x, excs=[("感恩", "呃"),("恩桥", "嗯"),("恩怨", "emm")])
465
+ x = re.sub(r'([^()]*)', ' ', x) # remove (...)
466
+ x = re.sub(r'[()]+', ' ', x) # remove isolated()
467
  x = re.sub(r"\s+", " ", x)
468
+ # remove (...) except for 'Program Files (x86)'
469
  x = replace_except(r'\([^()]*\)', ' ', x, excs=[("Program Files (x86)", "呃")])
470
+ x = re.sub(r'[()]+', ' ', x) # remove isolated ()
471
+ puncs = r'[,?!。:;~?!,.:;~]'
472
  x = re.sub(rf'({puncs})(?:\s*\1)+', r'\1', x) # ??? -> ?
473
  x = re.sub(rf"\s+({puncs})", r'\1', x) # text , -> text,
474
+ sp_puncs = r'[?!,.;]' # puncs with spaces
475
+ x = re.sub(rf"({puncs}*{sp_puncs})([^\d])", r'\1 \2', x) # text!?cont -> text!? cont
476
  x = re.sub(rf"^[\s]*{puncs}+", "", x) # leading puncs
477
  x = re.sub(r"\s+", " ", x) # excess spaces
478
  return x.strip()
479
 
480
+ def clean_batch(batch):
481
+ return {'text': [clean_transcripts(x) for x in batch['text']]}
482
+
483
+ audio_dataset_manual_clean = audio_dataset_manual.map(clean_batch, batched=True, num_proc=8)
484
+ ```
485
+ 4. Publish
486
+ ```python
487
  audio_dataset_manual_clean.push_to_hub(
488
  "georgechang8/code_switch_yodas_zh",
489
  config_name="clean",
clean_extra/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:04c037c13cc7bc8a8ad632badf600b8276b148ed27cb57294625a3694d02080c
3
- size 17384352
 
 
 
 
clean_extra/train-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8bd8d4fb44647dea7a8699b5cfb3a8ae3698c8702c59c44bef6ca46057b932b
3
- size 359585165
 
 
 
 
clean_extra/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ab559039731f2bf590ebd5e3ba90e83d6308b3b44ade431343acc4bcc57b77f
3
- size 21195835