cheulyop commited on
Commit
6ec1b74
1 Parent(s): 2cadb67

Yield full path to audio files

Browse files
Files changed (1) hide show
  1. ksponspeech.py +17 -17
ksponspeech.py CHANGED
@@ -15,9 +15,9 @@
15
  """The Korean Spontaneous Speech Corpus for Automatic Speech Recognition (KsponSpeech)"""
16
 
17
 
18
- import os
19
- import datasets
20
 
 
21
 
22
  _CITATION = """\
23
  @article{bang2020ksponspeech,
@@ -46,9 +46,7 @@ class KsponSpeech(datasets.GeneratorBasedBuilder):
46
 
47
  @property
48
  def manual_download_instructions(self):
49
- return (
50
- "To use KsponSpeech, data files must be downloaded manually to a local drive. Please submit your request on the official website (https://aihub.or.kr/aidata/105). Once your request is approved, download all files, extract .zip files in one folder, and load the dataset with `datasets.load_dataset('ksponspeech', data_dir='path/to/folder')`."
51
- )
52
 
53
  def _info(self):
54
  return datasets.DatasetInfo(
@@ -66,12 +64,12 @@ class KsponSpeech(datasets.GeneratorBasedBuilder):
66
 
67
  def _split_generators(self, dl_manager):
68
  """Returns SplitGenerators."""
69
- data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
70
  return [
71
  datasets.SplitGenerator(
72
  name=datasets.Split.TRAIN,
73
  gen_kwargs={
74
- "filepath": os.path.join(data_dir, "scripts/train.trn"),
75
  "split": "train",
76
  },
77
  ),
@@ -79,30 +77,32 @@ class KsponSpeech(datasets.GeneratorBasedBuilder):
79
  name=datasets.Split.TEST,
80
  gen_kwargs={
81
  "filepath": {
82
- "clean": os.path.join(data_dir, "scripts/eval_clean.trn"),
83
- "other": os.path.join(data_dir, "scripts/eval_other.trn"),
84
  },
85
- "split": "test"
86
  },
87
  ),
88
  datasets.SplitGenerator(
89
  name=datasets.Split.VALIDATION,
90
  gen_kwargs={
91
- "filepath": os.path.join(data_dir, "scripts/dev.trn"),
92
- "split": "dev",
93
  },
94
  ),
95
  ]
96
 
97
  def _generate_examples(self, filepath, split):
98
- """ Yields examples as (key, example) tuples. """
99
  if split is "test":
100
- with open(filepath["clean"], encoding="utf-8") as f1, open(filepath["other"], encoding="utf-8") as f2:
 
 
101
  data = "\n".join([f1.read().strip(), f2.read().strip()])
102
  for id_, row in enumerate(data.split("\n")):
103
  path, sentence = tuple(row.split(" :: "))
104
  yield id_, {
105
- "path": path,
106
  "sentence": sentence,
107
  }
108
  else:
@@ -111,6 +111,6 @@ class KsponSpeech(datasets.GeneratorBasedBuilder):
111
  for id_, row in enumerate(data.split("\n")):
112
  path, sentence = tuple(row.split(" :: "))
113
  yield id_, {
114
- "path": path,
115
- "sentence": sentence
116
  }
 
15
  """The Korean Spontaneous Speech Corpus for Automatic Speech Recognition (KsponSpeech)"""
16
 
17
 
18
+ from os import path
 
19
 
20
+ import datasets
21
 
22
  _CITATION = """\
23
  @article{bang2020ksponspeech,
 
46
 
47
  @property
48
  def manual_download_instructions(self):
49
+ return "To use KsponSpeech, data files must be downloaded manually to a local drive. Please submit your request on the official website (https://aihub.or.kr/aidata/105). Once your request is approved, download all files, extract .zip files in one folder, and load the dataset with `datasets.load_dataset('ksponspeech', data_dir='path/to/folder')`."
 
 
50
 
51
  def _info(self):
52
  return datasets.DatasetInfo(
 
64
 
65
  def _split_generators(self, dl_manager):
66
  """Returns SplitGenerators."""
67
+ self.data_dir = path.abspath(path.expanduser(dl_manager.manual_dir))
68
  return [
69
  datasets.SplitGenerator(
70
  name=datasets.Split.TRAIN,
71
  gen_kwargs={
72
+ "filepath": path.join(self.data_dir, "scripts/train.trn"),
73
  "split": "train",
74
  },
75
  ),
 
77
  name=datasets.Split.TEST,
78
  gen_kwargs={
79
  "filepath": {
80
+ "clean": path.join(self.data_dir, "scripts/eval_clean.trn"),
81
+ "other": path.join(self.data_dir, "scripts/eval_other.trn"),
82
  },
83
+ "split": "test",
84
  },
85
  ),
86
  datasets.SplitGenerator(
87
  name=datasets.Split.VALIDATION,
88
  gen_kwargs={
89
+ "filepath": path.join(self.data_dir, "scripts/dev.trn"),
90
+ "split": "validation",
91
  },
92
  ),
93
  ]
94
 
95
  def _generate_examples(self, filepath, split):
96
+ """Yields examples as (key, example) tuples."""
97
  if split is "test":
98
+ with open(filepath["clean"], encoding="utf-8") as f1, open(
99
+ filepath["other"], encoding="utf-8"
100
+ ) as f2:
101
  data = "\n".join([f1.read().strip(), f2.read().strip()])
102
  for id_, row in enumerate(data.split("\n")):
103
  path, sentence = tuple(row.split(" :: "))
104
  yield id_, {
105
+ "file": path.join(self.data_dir, path),
106
  "sentence": sentence,
107
  }
108
  else:
 
111
  for id_, row in enumerate(data.split("\n")):
112
  path, sentence = tuple(row.split(" :: "))
113
  yield id_, {
114
+ "file": path.join(self.data_dir, path),
115
+ "sentence": sentence,
116
  }