Datasets:
Tasks:
Text Generation
Sub-tasks:
language-modeling
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
update
Browse files- .gitignore +1 -0
- generate_reference_files.py +15 -0
- process.py +8 -6
- qg_subjqa.py +15 -7
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
SubjQA
|
|
|
|
1 |
SubjQA
|
2 |
+
data/processed/default.train.jsonl
|
generate_reference_files.py
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from datasets import load_dataset
|
3 |
+
|
4 |
+
os.makedirs('./reference_files', exist_ok=True)
|
5 |
+
|
6 |
+
|
7 |
+
for split in ['validation', 'test']:
|
8 |
+
dataset = load_dataset('asahi417/qg_newsqa', '', split=split)
|
9 |
+
for data in ['question', 'answer', 'sentence', 'paragraph']:
|
10 |
+
with open('./reference_files/{}-{}.txt'.format(data, split), 'w') as f:
|
11 |
+
if data == 'paragraph':
|
12 |
+
f.write('\n'.join(dataset['paragraph_id']))
|
13 |
+
else:
|
14 |
+
f.write('\n'.join(dataset[data]))
|
15 |
+
|
process.py
CHANGED
@@ -2,16 +2,12 @@
|
|
2 |
You need to run `python -m spacy download en_core_web_sm`.
|
3 |
Split when uploading to dataset hub by
|
4 |
```
|
5 |
-
gsplit -l
|
6 |
-
gsplit -l 600 -d --additional-suffix=.jsonl train.jsonl train
|
7 |
-
gsplit -l 600 -d --additional-suffix=.jsonl valid.jsonl valid
|
8 |
```
|
9 |
"""
|
10 |
import json
|
11 |
import os
|
12 |
import re
|
13 |
-
from tqdm import tqdm
|
14 |
-
from itertools import chain
|
15 |
|
16 |
import pandas as pd
|
17 |
import spacy
|
@@ -111,4 +107,10 @@ if __name__ == '__main__':
|
|
111 |
with open(f'./data/processed/{i}.{s.replace(".csv", ".jsonl")}', 'w') as f:
|
112 |
f.write('\n'.join([json.dumps(i) for i in output]))
|
113 |
|
114 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
You need to run `python -m spacy download en_core_web_sm`.
|
3 |
Split when uploading to dataset hub by
|
4 |
```
|
5 |
+
gsplit -l 1500 -d --additional-suffix=.jsonl default.train.jsonl default.train
|
|
|
|
|
6 |
```
|
7 |
"""
|
8 |
import json
|
9 |
import os
|
10 |
import re
|
|
|
|
|
11 |
|
12 |
import pandas as pd
|
13 |
import spacy
|
|
|
107 |
with open(f'./data/processed/{i}.{s.replace(".csv", ".jsonl")}', 'w') as f:
|
108 |
f.write('\n'.join([json.dumps(i) for i in output]))
|
109 |
|
110 |
+
# for s in ["dev", "test", "train"]:
|
111 |
+
# output = []
|
112 |
+
# for i in ["books", "electronics", "grocery", "movies", "restaurants", "tripadvisor"]:
|
113 |
+
# with open(f'./data/processed/{i}.{s}.jsonl', 'r') as f:
|
114 |
+
# output += [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
115 |
+
# with open(f'./data/processed/default.{s}.jsonl', 'w') as f:
|
116 |
+
# f.write('\n'.join([json.dumps(i) for i in output]))
|
qg_subjqa.py
CHANGED
@@ -20,7 +20,8 @@ class QGSubjQAConfig(datasets.BuilderConfig):
|
|
20 |
|
21 |
class QGSubjQA(datasets.GeneratorBasedBuilder):
|
22 |
|
23 |
-
BUILDER_CONFIGS = [QGSubjQAConfig(name=
|
|
|
24 |
|
25 |
def _info(self):
|
26 |
return datasets.DatasetInfo(
|
@@ -44,15 +45,22 @@ class QGSubjQA(datasets.GeneratorBasedBuilder):
|
|
44 |
)
|
45 |
|
46 |
def _split_generators(self, dl_manager):
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
return [
|
53 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_file["train"]}),
|
54 |
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": downloaded_file["dev"]}),
|
55 |
-
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": downloaded_file["test"]})
|
56 |
]
|
57 |
|
58 |
def _generate_examples(self, filepaths):
|
|
|
20 |
|
21 |
class QGSubjQA(datasets.GeneratorBasedBuilder):
|
22 |
|
23 |
+
BUILDER_CONFIGS = [QGSubjQAConfig(name="default", description="SubjQA from all domain of `{}`.")]
|
24 |
+
BUILDER_CONFIGS += [QGSubjQAConfig(name=i, description=f"SubjQA from domain of `{i}`.") for i in _DOMAINS]
|
25 |
|
26 |
def _info(self):
|
27 |
return datasets.DatasetInfo(
|
|
|
45 |
)
|
46 |
|
47 |
def _split_generators(self, dl_manager):
|
48 |
+
if self.config.name == 'default':
|
49 |
+
downloaded_file = dl_manager.download_and_extract({
|
50 |
+
'train': [f"{_URL}/{i}.train.jsonl" for i in _DOMAINS],
|
51 |
+
'dev': [f"{_URL}/{i}.dev.jsonl" for i in _DOMAINS],
|
52 |
+
'test': [f"{_URL}/{i}.test.jsonl" for i in _DOMAINS]
|
53 |
+
})
|
54 |
+
else:
|
55 |
+
downloaded_file = dl_manager.download_and_extract({
|
56 |
+
'train': f"{_URL}/{self.config.name}.train.jsonl",
|
57 |
+
'dev': f"{_URL}/{self.config.name}.dev.jsonl",
|
58 |
+
'test': f"{_URL}/{self.config.name}.test.jsonl"
|
59 |
+
})
|
60 |
return [
|
61 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_file["train"]}),
|
62 |
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": downloaded_file["dev"]}),
|
63 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": downloaded_file["test"]})
|
64 |
]
|
65 |
|
66 |
def _generate_examples(self, filepaths):
|