speech-test commited on
Commit
108f378
1 Parent(s): e45c283
Files changed (2) hide show
  1. sd.json +3 -0
  2. superb_dummy.py +27 -0
sd.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43d6de49a557a63b23147e1b16c10fb624d220b40afc7b620dd1cf9ac540c739
3
+ size 6949114
superb_dummy.py CHANGED
@@ -217,6 +217,33 @@ class Superb(datasets.GeneratorBasedBuilder):
217
  url="https://sail.usc.edu/iemocap/",
218
  data_url="er.json",
219
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220
  ]
221
 
222
  def _info(self):
 
217
  url="https://sail.usc.edu/iemocap/",
218
  data_url="er.json",
219
  ),
220
+ SuperbConfig(
221
+ name="sd",
222
+ description=textwrap.dedent(
223
+ """\
224
+ Speaker Diarization (SD) predicts `who is speaking when` for each timestamp, and multiple speakers can
225
+ speak simultaneously. The model has to encode rich speaker characteristics for each frame and should be
226
+ able to represent mixtures of signals. [LibriMix] is adopted where LibriSpeech
227
+ train-clean-100/dev-clean/test-clean are used to generate mixtures for training/validation/testing.
228
+ We focus on the two-speaker scenario as the first step. The time-coded speaker labels were generated using
229
+ alignments from Kaldi LibriSpeech ASR model. The evaluation metric is diarization error rate (DER)."""
230
+ ),
231
+ features=datasets.Features(
232
+ {
233
+ "file": datasets.Value("string"),
234
+ "speech": datasets.Sequence(datasets.Value("float32")),
235
+ "speakers": [
236
+ {
237
+ "speaker_id": datasets.Value("string"),
238
+ "start": datasets.Value("int64"),
239
+ "end": datasets.Value("int64"),
240
+ }
241
+ ],
242
+ }
243
+ ),
244
+ url="https://github.com/ftshijt/LibriMix",
245
+ data_url="sd.json",
246
+ ),
247
  ]
248
 
249
  def _info(self):