Datasets:
lmqg
/

Sub-tasks:
extractive-qa
Languages:
English
Multilinguality:
monolingual
Size Categories:
1M<
Source Datasets:
extended|wikipedia
ArXiv:
Tags:
License:
asahi417 commited on
Commit
3b31cf0
1 Parent(s): 0eddab4
Files changed (3) hide show
  1. README.md +0 -2
  2. process.py +26 -0
  3. qa_squad.py +14 -20
README.md CHANGED
@@ -51,7 +51,6 @@ The data fields are the same among all splits.
51
  ## Citation Information
52
 
53
  ```
54
-
55
  @article{2016arXiv160605250R,
56
  author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
57
  Konstantin and {Liang}, Percy},
@@ -63,5 +62,4 @@ The data fields are the same among all splits.
63
  archivePrefix = {arXiv},
64
  eprint = {1606.05250},
65
  }
66
-
67
  ```
 
51
  ## Citation Information
52
 
53
  ```
 
54
  @article{2016arXiv160605250R,
55
  author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
56
  Konstantin and {Liang}, Percy},
 
62
  archivePrefix = {arXiv},
63
  eprint = {1606.05250},
64
  }
 
65
  ```
process.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from datasets import load_dataset
4
+
5
+ os.makedirs('datasets', exist_ok=True)
6
+
7
+ data = load_dataset("lmqg/qg_squad")
8
+ _id = 0
9
+ for _split in data:
10
+ output = []
11
+ for d in data[_split]:
12
+ a = d['answer']
13
+ p = d['paragraph']
14
+ output.append({
15
+ "id": str(_id),
16
+ "title": "None",
17
+ "context": d['paragraph'],
18
+ "question": d['question'],
19
+ "answers": {
20
+ "text": [a],
21
+ "answer_start": [p.index(a)]
22
+ }
23
+ })
24
+ _id += 1
25
+ with open(f'datasets/{_split}.jsonl', 'w') as f:
26
+ f.write('\n'.join([json.dumps(i) for i in output]))
qa_squad.py CHANGED
@@ -8,27 +8,21 @@ _VERSION = "0.0.0"
8
  _NAME = "qa_squad"
9
  _DESCRIPTION = """SQuAD with the train/validation/test split used in SQuAD QG"""
10
  _CITATION = """
11
- @inproceedings{du-cardie-2018-harvesting,
12
- title = "Harvesting Paragraph-level Question-Answer Pairs from {W}ikipedia",
13
- author = "Du, Xinya and
14
- Cardie, Claire",
15
- booktitle = "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
16
- month = jul,
17
- year = "2018",
18
- address = "Melbourne, Australia",
19
- publisher = "Association for Computational Linguistics",
20
- url = "https://aclanthology.org/P18-1177",
21
- doi = "10.18653/v1/P18-1177",
22
- pages = "1907--1917",
23
- abstract = "We study the task of generating from Wikipedia articles question-answer pairs that cover content beyond a single sentence. We propose a neural network approach that incorporates coreference knowledge via a novel gating mechanism. As compared to models that only take into account sentence-level information (Heilman and Smith, 2010; Du et al., 2017; Zhou et al., 2017), we find that the linguistic knowledge introduced by the coreference representation aids question generation significantly, producing models that outperform the current state-of-the-art. We apply our system (composed of an answer span extraction system and the passage-level QG system) to the 10,000 top ranking Wikipedia articles and create a corpus of over one million question-answer pairs. We provide qualitative analysis for the this large-scale generated corpus from Wikipedia.",
24
  }
25
  """
26
- _BASE_URL = "https://huggingface.co/datasets/lmqg/qa_squad/resolve/main/dataset"
27
- _URLS = {
28
- str(datasets.Split.TRAIN): f'{_BASE_URL}/train.json',
29
- str(datasets.Split.VALIDATION): f'{_BASE_URL}/dev.json',
30
- str(datasets.Split.TEST): f'{_BASE_URL}/test.json'
31
- }
32
 
33
 
34
  class QASquadConfig(datasets.BuilderConfig):
@@ -104,4 +98,4 @@ class QASquad(datasets.GeneratorBasedBuilder):
104
  "text": answers,
105
  },
106
  }
107
- key += 1
 
8
  _NAME = "qa_squad"
9
  _DESCRIPTION = """SQuAD with the train/validation/test split used in SQuAD QG"""
10
  _CITATION = """
11
+ @article{2016arXiv160605250R,
12
+ author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
13
+ Konstantin and {Liang}, Percy},
14
+ title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
15
+ journal = {arXiv e-prints},
16
+ year = 2016,
17
+ eid = {arXiv:1606.05250},
18
+ pages = {arXiv:1606.05250},
19
+ archivePrefix = {arXiv},
20
+ eprint = {1606.05250},
 
 
 
21
  }
22
  """
23
+ _BASE_URL = "https://huggingface.co/datasets/lmqg/qa_squad/resolve/main/datasets"
24
+ _URLS = {k: f'{_BASE_URL}/{k}.json' for k in
25
+ [str(datasets.Split.TEST), str(datasets.Split.TRAIN), str(datasets.Split.VALIDATION)]}
 
 
 
26
 
27
 
28
  class QASquadConfig(datasets.BuilderConfig):
 
98
  "text": answers,
99
  },
100
  }
101
+ key += 1