Datasets:

Modalities:
Audio
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
Dask
felixgwu commited on
Commit
f6edf53
1 Parent(s): bb348b2

add slue-sqa5 loading script (#3)

Browse files

- add slue-sqa5 loading script to slue-phase-2.py (ad4e9da397e39d947d001715bdd6fc64349bb135)

Files changed (1) hide show
  1. slue-phase-2.py +114 -2
slue-phase-2.py CHANGED
@@ -13,6 +13,7 @@ _URL = "https://asappresearch.github.io/slue-toolkit/"
13
 
14
  _DL_URLS = {
15
  "slue-hvb": "data/slue-hvb_blind.zip",
 
16
  }
17
 
18
  _LICENSE = """
@@ -40,6 +41,21 @@ SLUE-HVB dataset contains a subset of the Gridspace-Stanford Harper Valley speec
40
 
41
  Additionally, we provide dialog act classification annotation and it is covered with the same license as CC-BY-4.0.
42
  =======================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
  """
45
 
@@ -56,6 +72,31 @@ _DESCRIPTION = """\
56
  Spoken Language Understanding Evaluation (SLUE) benchmark Phase 2.
57
  """
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  class SLUE2Config(datasets.BuilderConfig):
60
  """BuilderConfig for SLUE."""
61
 
@@ -83,6 +124,10 @@ class SLUE2(datasets.GeneratorBasedBuilder):
83
  name="hvb",
84
  description="SLUE-HVB set.",
85
  ),
 
 
 
 
86
  ]
87
 
88
  def _info(self):
@@ -102,6 +147,34 @@ class SLUE2(datasets.GeneratorBasedBuilder):
102
  datasets.Value("string"),
103
  ),
104
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  return datasets.DatasetInfo(
106
  description=_DESCRIPTION,
107
  features=datasets.Features(features),
@@ -148,13 +221,28 @@ class SLUE2(datasets.GeneratorBasedBuilder):
148
  },
149
  ),
150
  ]
 
 
 
 
 
 
 
 
 
 
 
 
151
  return splits
152
 
153
  def _generate_examples(self, filepath, data_dir):
154
  logger.info(f"generating examples from = {filepath}")
155
 
156
  with open(filepath) as f:
157
- reader = csv.DictReader(f, delimiter="\t")
 
 
 
158
 
159
  for idx, row in enumerate(reader):
160
  if self.config.name == "hvb":
@@ -176,4 +264,28 @@ class SLUE2(datasets.GeneratorBasedBuilder):
176
  "intent": row["intent"],
177
  "dialog_acts": eval(row.get("dialog_acts", "[]")),
178
  }
179
- yield idx, example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  _DL_URLS = {
15
  "slue-hvb": "data/slue-hvb_blind.zip",
16
+ "slue-sqa5": "data/slue-sqa5_blind.zip",
17
  }
18
 
19
  _LICENSE = """
 
41
 
42
  Additionally, we provide dialog act classification annotation and it is covered with the same license as CC-BY-4.0.
43
  =======================================================
44
+ SLUE-SQA-5 Dataset
45
+
46
+ SLUE-SQA-5 Dataset contains question texts and answer strings (question_text, normalized_question_text, and answer_spans column in .tsv files) from these datasets,
47
+ * SQuAD1.1 (for questions whose question_id starts with ‘squad-’)
48
+ * Natural Questions (for questions whose question_id starts with ‘nq-’)
49
+ * WebQuestions (for questions whose question_id starts with ‘wq-’)
50
+ * CuratedTREC (for questions whose question_id starts with ‘trec-’)
51
+ * TriviaQA (for questions whose question_id starts with ‘triviaqa-’)
52
+ Additionally, we provide audio recordings (.wav files in “question” directories) of these questions.
53
+
54
+ For questions from TriviaQA (questions whose question_id starts with ‘triviaqa-’), their question texts, answer strings, and audio recordings are licensed with the same Apache License 2.0 as TriviaQA (for more detail, please refer to https://github.com/mandarjoshi90/triviaqa/blob/master/LICENSE).
55
+ For questions from the other 4 datasets, their question texts, answer strings, and audio recordings are licensed with Creative Commons Attribution-ShareAlike 4.0 International license.
56
+
57
+ SLUE-SQA-5 also contains a subset of Spoken Wikipedia, including the audios placed in “document” directories and their transcripts (document_text and normalized_document_text column in .tsv files). Additionally, we provide the text-to-speech alignments (.txt files in “word2time” directories).These contents are licensed with the same Creative Commons (CC BY-SA 4.0) license as Spoken Wikipedia.
58
+ =======================================================
59
 
60
  """
61
 
 
72
  Spoken Language Understanding Evaluation (SLUE) benchmark Phase 2.
73
  """
74
 
75
+ def parse_qa_answer_spans(answer_spans):
76
+ answer_spans = ast.literal_eval(answer_spans)
77
+ return [{"answer": a, "start_second": s, "end_second": e} for a, s, e in answer_spans]
78
+
79
+ def load_word2time(word2time_file):
80
+ word2time = []
81
+ with open(word2time_file, "r") as f:
82
+ for line in f.readlines():
83
+ entity = line.strip().split('\t')
84
+ if len(entity)==1:
85
+ word = entity[0]
86
+ normalized_word, start_sec, end_sec = "", -1.0, -1.0
87
+ else:
88
+ word, normalized_word, start_sec, end_sec = entity
89
+ start_sec, end_sec = float(start_sec), float(end_sec)
90
+ word2time.append(
91
+ {
92
+ "word": word,
93
+ "normalized_word": normalized_word,
94
+ "start_second": start_sec,
95
+ "end_second": end_sec,
96
+ }
97
+ )
98
+ return word2time
99
+
100
  class SLUE2Config(datasets.BuilderConfig):
101
  """BuilderConfig for SLUE."""
102
 
 
124
  name="hvb",
125
  description="SLUE-HVB set.",
126
  ),
127
+ SLUE2Config(
128
+ name="sqa5",
129
+ description="SLUE-SQA-5 set which includes Spoken Question Answering task.",
130
+ ),
131
  ]
132
 
133
  def _info(self):
 
147
  datasets.Value("string"),
148
  ),
149
  }
150
+ elif self.config.name == "sqa5":
151
+ features = {
152
+ "question_id": datasets.Value("string"),
153
+ "question_audio": datasets.Audio(sampling_rate=16_000),
154
+ "question_speaker_id": datasets.Value("string"),
155
+ "raw_question_text": datasets.Value("string"),
156
+ "normalized_question_text": datasets.Value("string"),
157
+ "document_id": datasets.Value("string"),
158
+ "document_audio": datasets.Audio(sampling_rate=16_000),
159
+ "document_speaker_id": datasets.Value("string"),
160
+ "raw_document_text": datasets.Value("string"),
161
+ "normalized_document_text": datasets.Value("string"),
162
+ "word2time": datasets.Sequence(
163
+ {
164
+ "word": datasets.Value("string"),
165
+ "normalized_word": datasets.Value("string"),
166
+ "start_second": datasets.Value("float64"),
167
+ "end_second": datasets.Value("float64"),
168
+ }
169
+ ),
170
+ "answer_spans": datasets.Sequence(
171
+ {
172
+ "answer": datasets.Value("string"),
173
+ "start_second": datasets.Value("float64"),
174
+ "end_second": datasets.Value("float64"),
175
+ }
176
+ ),
177
+ }
178
  return datasets.DatasetInfo(
179
  description=_DESCRIPTION,
180
  features=datasets.Features(features),
 
221
  },
222
  ),
223
  ]
224
+ if self.config.name == "sqa5":
225
+ splits.append(
226
+ datasets.SplitGenerator(
227
+ name="verified_test",
228
+ gen_kwargs={
229
+ "filepath": os.path.join(
230
+ data_dir or "", f"{config_name}_verified-test_blind.tsv"
231
+ ),
232
+ "data_dir": data_dir,
233
+ },
234
+ )
235
+ )
236
  return splits
237
 
238
  def _generate_examples(self, filepath, data_dir):
239
  logger.info(f"generating examples from = {filepath}")
240
 
241
  with open(filepath) as f:
242
+ if self.config.name == "sqa5":
243
+ reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
244
+ else:
245
+ reader = csv.DictReader(f, delimiter="\t")
246
 
247
  for idx, row in enumerate(reader):
248
  if self.config.name == "hvb":
 
264
  "intent": row["intent"],
265
  "dialog_acts": eval(row.get("dialog_acts", "[]")),
266
  }
267
+ elif self.config.name == "sqa5":
268
+ question_audio_file = os.path.join(
269
+ data_dir, row["split"], "question", row["question_id"] + ".wav"
270
+ )
271
+ document_audio_file = os.path.join(
272
+ data_dir, row["split"], "document", row["document_id"] + ".wav"
273
+ )
274
+ word2time_file = os.path.join(
275
+ data_dir, row["split"], "word2time", row["document_id"] + ".txt"
276
+ )
277
+ example = {
278
+ "question_id": row["question_id"],
279
+ "question_audio": question_audio_file,
280
+ "question_speaker_id": row["question_speaker_id"],
281
+ "raw_question_text": row["question_text"],
282
+ "normalized_question_text": row["normalized_question_text"],
283
+ "document_id": row["document_id"],
284
+ "document_audio": document_audio_file,
285
+ "document_speaker_id": row["document_speaker_id"],
286
+ "raw_document_text": row["document_text"],
287
+ "normalized_document_text": row["normalized_document_text"],
288
+ "word2time": load_word2time(word2time_file),
289
+ "answer_spans": parse_qa_answer_spans(row.get("answer_spans", "[]")),
290
+ }
291
+ yield idx, example