georgechang8 commited on
Commit
3263885
1 Parent(s): b492630

Fixed speaker and condition_on_prev

Browse files
30s/test-00000-of-00001.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8a419f58149fd0f9f330d3874be1d08a4d618e664ae71410df58b6d1b0e1892f
3
- size 104733846
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3623046a97ee33d69cf35b32e57da90e51f0199e2da152df2dfc0be89aff4df8
3
+ size 105200894
30s/train-00000-of-00002.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fbab0cf21cc59f9f06d89f45bd65dd9a7e1149a4393a710545576a466b3dbfa7
3
- size 506673760
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:085d086422efc57578e77008a77248947d490f5dbfc7aa581355e26669b739f8
3
+ size 505723004
30s/train-00001-of-00002.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:089bacf8cf6b572a952a7f677379e4c81a8c76ca6e696c605a63b74a4816c6df
3
- size 506694259
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9462ae37883513eb3377b3f7d369c15bbbb85dd4b6fd336fe1140ca9f224f27e
3
+ size 506341012
30s/validation-00000-of-00001.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:33642e67df5b95a7cdd6f467972a7d44e60f62b90aae1ce8d60b9911249c890a
3
- size 105777914
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35c62cf88a499d2133f5f8ccb94ff514c4ed5dce4c9b03397f58d5508734f3e2
3
+ size 105566982
30s_augment/train-00000-of-00002.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:94def5fa4ae0d5268f0eb3b55853153021eb99e611aab01dd3a7cff3c3e43392
3
- size 508144100
 
 
 
 
30s_augment/train-00001-of-00002.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:994d05eb3c8473246146f05a8de0b3df0f3229caecc6762b0f16831c65fc4f66
3
- size 506643252
 
 
 
 
30s_demucs/test-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e7a3a8cd0ded05a0833d58d946cd290379dab9929e835718895e0b5ecabf5973
3
- size 111773726
 
 
 
 
30s_demucs/train-00000-of-00002.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:69d92b10a3c46d0942af4c2f65db968f4cb7c088cb11c91cc88932b6eb2391cf
3
- size 541349063
 
 
 
 
30s_demucs/train-00001-of-00002.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7c9aafa33523b2c55e1526321cdfc17a6bc3cfb1af673f16f36847bd313afb6f
3
- size 538618160
 
 
 
 
30s_demucs/validation-00000-of-00001.parquet DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e78d86d51ee497881a0853498c33051db37e42e8dba38ebd7731f44740eac2a2
3
- size 113051374
 
 
 
 
README.md CHANGED
@@ -6,74 +6,6 @@ license: cc-by-sa-4.0
6
  size_categories:
7
  - 10K<n<100K
8
  dataset_info:
9
- - config_name: 30s
10
- features:
11
- - name: id
12
- dtype: string
13
- - name: audio
14
- dtype:
15
- audio:
16
- sampling_rate: 16000
17
- - name: transcription
18
- dtype: string
19
- - name: condition_on_prev
20
- dtype: string
21
- splits:
22
- - name: train
23
- num_bytes: 1131899415.68
24
- num_examples: 1315
25
- - name: validation
26
- num_bytes: 118639586.0
27
- num_examples: 139
28
- - name: test
29
- num_bytes: 120445541.0
30
- num_examples: 140
31
- download_size: 1223879779
32
- dataset_size: 1370984542.68
33
- - config_name: 30s_augment
34
- features:
35
- - name: id
36
- dtype: string
37
- - name: audio
38
- dtype:
39
- audio:
40
- sampling_rate: 16000
41
- - name: transcription
42
- dtype: string
43
- - name: duration
44
- dtype: float32
45
- - name: condition_on_prev
46
- dtype: string
47
- splits:
48
- - name: train
49
- num_bytes: 1139055524.508
50
- num_examples: 1332
51
- download_size: 1014787352
52
- dataset_size: 1139055524.508
53
- - config_name: 30s_demucs
54
- features:
55
- - name: id
56
- dtype: string
57
- - name: audio
58
- dtype:
59
- audio:
60
- sampling_rate: 16000
61
- - name: transcription
62
- dtype: string
63
- - name: condition_on_prev
64
- dtype: string
65
- splits:
66
- - name: train
67
- num_bytes: 1131902020.68
68
- num_examples: 1315
69
- - name: validation
70
- num_bytes: 118639860.0
71
- num_examples: 139
72
- - name: test
73
- num_bytes: 120445813.0
74
- num_examples: 140
75
- download_size: 1304792323
76
- dataset_size: 1370987693.68
77
  - config_name: default
78
  features:
79
  - name: id
@@ -109,34 +41,22 @@ dataset_info:
109
  download_size: 1223500329
110
  dataset_size: 1227500846.4050002
111
  configs:
112
- - config_name: 30s
113
- data_files:
114
- - split: train
115
- path: 30s/train-*
116
- - split: validation
117
- path: 30s/validation-*
118
- - split: test
119
- path: 30s/test-*
120
- - config_name: 30s_augment
121
- data_files:
122
- - split: train
123
- path: 30s_augment/train-*
124
- - config_name: 30s_demucs
125
- data_files:
126
- - split: train
127
- path: 30s_demucs/train-*
128
- - split: validation
129
- path: 30s_demucs/validation-*
130
- - split: test
131
- path: 30s_demucs/test-*
132
  - config_name: default
133
  data_files:
134
- - split: train
135
- path: data/train-*
136
- - split: test
137
- path: data/test-*
138
- - split: validation
139
- path: data/validation-*
 
 
 
 
 
 
 
 
140
  ---
141
  # Dataset Card for Dataset Name
142
 
@@ -145,6 +65,18 @@ This dataset is derived from CAiRE/ASCEND. More information is available at http
145
  - Removed 嗯 呃 um uh
146
  - Resolved [UNK]'s using whisper-medium
147
 
 
 
 
 
 
 
 
 
 
 
 
 
148
  ## Dataset Details
149
 
150
  ### Dataset Description
@@ -287,8 +219,6 @@ def fix_unk(sample, adjusted_dict):
287
  adjustment = transcription
288
  return {"transcription": adjustment}
289
 
290
-
291
-
292
  data = DatasetDict({
293
  split: data_raw[split].map(lambda x: fix_unk(x, adjusted_transcripts[split]), load_from_cache_file=False)
294
  for split in data_raw
 
6
  size_categories:
7
  - 10K<n<100K
8
  dataset_info:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  - config_name: default
10
  features:
11
  - name: id
 
41
  download_size: 1223500329
42
  dataset_size: 1227500846.4050002
43
  configs:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  - config_name: default
45
  data_files:
46
+ - path: data/train-*
47
+ split: train
48
+ - path: data/test-*
49
+ split: test
50
+ - path: data/validation-*
51
+ split: validation
52
+ - config_name: 30s
53
+ data_files:
54
+ - path: 30s/train-*
55
+ split: train
56
+ - path: 30s/validation-*
57
+ split: validation
58
+ - path: 30s/test-*
59
+ split: test
60
  ---
61
  # Dataset Card for Dataset Name
62
 
 
65
  - Removed 嗯 呃 um uh
66
  - Resolved [UNK]'s using whisper-medium
67
 
68
+ ## Usage
69
+ - Default utterances with cleaned transcripts
70
+ ```python
71
+ from datasets import load_dataset
72
+ data = load_dataset("georgechang8/ASCEND_CLEAN") # add split="train" for train set, etc.
73
+ ```
74
+ - Concatenated 30s utterances with cleaned transcripts
75
+ - https://github.com/George0828Zhang/distil-whisper/blob/main/training/run_concatenate.py
76
+ ```python
77
+ data = load_dataset("georgechang8/ASCEND_CLEAN", "30s") # add split="train" for train set, etc.
78
+ ```
79
+
80
  ## Dataset Details
81
 
82
  ### Dataset Description
 
219
  adjustment = transcription
220
  return {"transcription": adjustment}
221
 
 
 
222
  data = DatasetDict({
223
  split: data_raw[split].map(lambda x: fix_unk(x, adjusted_transcripts[split]), load_from_cache_file=False)
224
  for split in data_raw