Datasets:

Sub-tasks:
extractive-qa
Languages:
Korean
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
crowdsourced
Tags:
License:
albertvillanova HF staff commited on
Commit
2119133
1 Parent(s): fa44f1d

Support streaming (#2)

Browse files

- Use iter_files (6bb8b2d4feba7f52ab7084e2f3d4e9abc6418091)

Files changed (1) hide show
  1. squad_kor_v2.py +41 -37
squad_kor_v2.py CHANGED
@@ -17,7 +17,6 @@
17
 
18
 
19
  import json
20
- import os
21
 
22
  import datasets
23
 
@@ -105,44 +104,49 @@ class SquadKorV2(datasets.GeneratorBasedBuilder):
105
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
106
 
107
  return [
108
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"dirs": downloaded_files["train"]}),
109
  datasets.SplitGenerator(
110
- name=datasets.Split.VALIDATION, gen_kwargs={"dirs": downloaded_files["validation"]}
 
 
 
 
 
 
 
 
 
111
  ),
112
  ]
113
 
114
- def _generate_examples(self, dirs):
115
  """Yields examples."""
116
-
117
- for d in dirs:
118
- filepaths = sorted(os.scandir(d), key=lambda x: x.name)
119
- for filepath in filepaths:
120
- with open(filepath.path, encoding="utf-8") as f:
121
- squad = json.load(f)
122
- for example in squad["data"]:
123
- title = example.get("title", "").strip()
124
- url = example.get("url", "").strip()
125
- raw_html = example.get("raw_html", "").strip()
126
- context = example["context"].strip()
127
- for qa in example["qas"]:
128
- question = qa["question"].strip()
129
- answer = qa["answer"]
130
- id_ = qa["id"]
131
-
132
- answer_start = answer["answer_start"]
133
- html_answer_start = answer["html_answer_start"]
134
- answer_text = answer["text"].strip()
135
-
136
- yield id_, {
137
- "title": title,
138
- "context": context,
139
- "question": question,
140
- "id": id_,
141
- "answer": {
142
- "answer_start": answer_start,
143
- "html_answer_start": html_answer_start,
144
- "text": answer_text,
145
- },
146
- "url": url,
147
- "raw_html": raw_html,
148
- }
17
 
18
 
19
  import json
 
20
 
21
  import datasets
22
 
104
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
105
 
106
  return [
 
107
  datasets.SplitGenerator(
108
+ name=datasets.Split.TRAIN,
109
+ gen_kwargs={
110
+ "filepaths": dl_manager.iter_files(downloaded_files["train"]),
111
+ },
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.VALIDATION,
115
+ gen_kwargs={
116
+ "filepaths": dl_manager.iter_files(downloaded_files["validation"]),
117
+ },
118
  ),
119
  ]
120
 
121
+ def _generate_examples(self, filepaths):
122
  """Yields examples."""
123
+ for filepath in filepaths:
124
+ with open(filepath, encoding="utf-8") as f:
125
+ squad = json.load(f)
126
+ for example in squad["data"]:
127
+ title = example.get("title", "").strip()
128
+ url = example.get("url", "").strip()
129
+ raw_html = example.get("raw_html", "").strip()
130
+ context = example["context"].strip()
131
+ for qa in example["qas"]:
132
+ question = qa["question"].strip()
133
+ answer = qa["answer"]
134
+ id_ = qa["id"]
135
+
136
+ answer_start = answer["answer_start"]
137
+ html_answer_start = answer["html_answer_start"]
138
+ answer_text = answer["text"].strip()
139
+
140
+ yield id_, {
141
+ "title": title,
142
+ "context": context,
143
+ "question": question,
144
+ "id": id_,
145
+ "answer": {
146
+ "answer_start": answer_start,
147
+ "html_answer_start": html_answer_start,
148
+ "text": answer_text,
149
+ },
150
+ "url": url,
151
+ "raw_html": raw_html,
152
+ }