Datasets:
lmqg
/

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Source Datasets:
subjqa
ArXiv:
Tags:
question-generation
License:
asahi417 commited on
Commit
440df90
1 Parent(s): 7e521d5
.gitattributes CHANGED
@@ -47,3 +47,36 @@ data/processed/amazon.train.jsonl filter=lfs diff=lfs merge=lfs -text
47
  data/processed/new_wiki.train.jsonl filter=lfs diff=lfs merge=lfs -text
48
  data/processed/nyt.test.jsonl filter=lfs diff=lfs merge=lfs -text
49
  data/processed/nyt.validation.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  data/processed/new_wiki.train.jsonl filter=lfs diff=lfs merge=lfs -text
48
  data/processed/nyt.test.jsonl filter=lfs diff=lfs merge=lfs -text
49
  data/processed/nyt.validation.jsonl filter=lfs diff=lfs merge=lfs -text
50
+ data/processed/amazon.validation00.jsonl filter=lfs diff=lfs merge=lfs -text
51
+ data/processed/reddit.validation00.jsonl filter=lfs diff=lfs merge=lfs -text
52
+ data/processed/reddit.train02.jsonl filter=lfs diff=lfs merge=lfs -text
53
+ data/processed/new_wiki.test01.jsonl filter=lfs diff=lfs merge=lfs -text
54
+ data/processed/new_wiki.validation00.jsonl filter=lfs diff=lfs merge=lfs -text
55
+ data/processed/nyt.test00.jsonl filter=lfs diff=lfs merge=lfs -text
56
+ data/processed/nyt.test01.jsonl filter=lfs diff=lfs merge=lfs -text
57
+ data/processed/nyt.validation00.jsonl filter=lfs diff=lfs merge=lfs -text
58
+ data/processed/reddit.test00.jsonl filter=lfs diff=lfs merge=lfs -text
59
+ data/processed/reddit.train00.jsonl filter=lfs diff=lfs merge=lfs -text
60
+ data/processed/amazon.test01.jsonl filter=lfs diff=lfs merge=lfs -text
61
+ data/processed/new_wiki.test00.jsonl filter=lfs diff=lfs merge=lfs -text
62
+ data/processed/reddit.test02.jsonl filter=lfs diff=lfs merge=lfs -text
63
+ data/processed/amazon.test02.jsonl filter=lfs diff=lfs merge=lfs -text
64
+ data/processed/amazon.train00.jsonl filter=lfs diff=lfs merge=lfs -text
65
+ data/processed/amazon.train01.jsonl filter=lfs diff=lfs merge=lfs -text
66
+ data/processed/new_wiki.train01.jsonl filter=lfs diff=lfs merge=lfs -text
67
+ data/processed/reddit.test01.jsonl filter=lfs diff=lfs merge=lfs -text
68
+ data/processed/amazon.test00.jsonl filter=lfs diff=lfs merge=lfs -text
69
+ data/processed/amazon.test03.jsonl filter=lfs diff=lfs merge=lfs -text
70
+ data/processed/new_wiki.train00.jsonl filter=lfs diff=lfs merge=lfs -text
71
+ data/processed/amazon.validation01.jsonl filter=lfs diff=lfs merge=lfs -text
72
+ data/processed/nyt.test03.jsonl filter=lfs diff=lfs merge=lfs -text
73
+ data/processed/nyt.validation01.jsonl filter=lfs diff=lfs merge=lfs -text
74
+ data/processed/reddit.validation01.jsonl filter=lfs diff=lfs merge=lfs -text
75
+ data/processed/amazon.train02.jsonl filter=lfs diff=lfs merge=lfs -text
76
+ data/processed/nyt.test02.jsonl filter=lfs diff=lfs merge=lfs -text
77
+ data/processed/nyt.train00.jsonl filter=lfs diff=lfs merge=lfs -text
78
+ data/processed/nyt.train01.jsonl filter=lfs diff=lfs merge=lfs -text
79
+ data/processed/nyt.train02.jsonl filter=lfs diff=lfs merge=lfs -text
80
+ data/processed/reddit.test03.jsonl filter=lfs diff=lfs merge=lfs -text
81
+ data/processed/new_wiki.test02.jsonl filter=lfs diff=lfs merge=lfs -text
82
+ data/processed/reddit.train01.jsonl filter=lfs diff=lfs merge=lfs -text
data/processed/amazon.test00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.test01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.test02.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.test03.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.train00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.train01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.train02.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.validation00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/amazon.validation01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.test00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.test01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.test02.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.train00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.train01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/new_wiki.validation00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.test00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.test01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.test02.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.test03.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.train00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.train01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.train02.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.validation00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/nyt.validation01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.test00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.test01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.test02.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.test03.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.train00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.train01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.train02.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.validation00.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
data/processed/reddit.validation01.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
qg_squadshifts.py CHANGED
@@ -4,7 +4,7 @@ from itertools import chain
4
  import datasets
5
 
6
  logger = datasets.logging.get_logger(__name__)
7
- _VERSION = "1.0.0"
8
  _CITATION = """
9
  @inproceedings{ushio-etal-2022-generative,
10
  title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration",
@@ -19,7 +19,7 @@ _CITATION = """
19
  }
20
  """
21
  _DESCRIPTION = """[SQuAD Shifts](https://modestyachts.github.io/squadshifts-website/index.html) dataset for question generation (QG) task."""
22
- _URL = 'https://huggingface.co/datasets/lmqg/qg_squadshifts/raw/main/data/processed'
23
  _FILES = {
24
  str(datasets.Split.TEST): {
25
  'new_wiki': [f'{_URL}/new_wiki.test{i:02d}.jsonl' for i in range(3)],
@@ -84,7 +84,7 @@ class QGSQuADShifts(datasets.GeneratorBasedBuilder):
84
  description=_DESCRIPTION,
85
  features=datasets.Features(
86
  {
87
- "answer": datasets.Value("string"),
88
  "question": datasets.Value("string"),
89
  "sentence": datasets.Value("string"),
90
  "paragraph": datasets.Value("string"),
 
4
  import datasets
5
 
6
  logger = datasets.logging.get_logger(__name__)
7
+ _VERSION = "5.0.1"
8
  _CITATION = """
9
  @inproceedings{ushio-etal-2022-generative,
10
  title = "{G}enerative {L}anguage {M}odels for {P}aragraph-{L}evel {Q}uestion {G}eneration",
 
19
  }
20
  """
21
  _DESCRIPTION = """[SQuAD Shifts](https://modestyachts.github.io/squadshifts-website/index.html) dataset for question generation (QG) task."""
22
+ _URL = 'https://huggingface.co/datasets/lmqg/qg_squadshifts/resolve/main/data/processed'
23
  _FILES = {
24
  str(datasets.Split.TEST): {
25
  'new_wiki': [f'{_URL}/new_wiki.test{i:02d}.jsonl' for i in range(3)],
 
84
  description=_DESCRIPTION,
85
  features=datasets.Features(
86
  {
87
+ "answer": datasets.Value("string"), "paragraph_question": datasets.Value("string"),
88
  "question": datasets.Value("string"),
89
  "sentence": datasets.Value("string"),
90
  "paragraph": datasets.Value("string"),