Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
10K - 100K
ArXiv:
License:
update
Browse files- README.md +0 -0
- data/processed/amazon.test00.jsonl +0 -0
- data/processed/amazon.test01.jsonl +0 -0
- data/processed/amazon.test02.jsonl +0 -0
- data/processed/amazon.test03.jsonl +0 -0
- data/processed/amazon.test04.jsonl +0 -0
- data/processed/new_wiki.test00.jsonl +0 -0
- data/processed/new_wiki.test01.jsonl +0 -0
- data/processed/new_wiki.test02.jsonl +0 -0
- data/processed/new_wiki.test03.jsonl +0 -0
- data/processed/nyt.test00.jsonl +0 -0
- data/processed/nyt.test01.jsonl +0 -0
- data/processed/nyt.test02.jsonl +0 -0
- data/processed/nyt.test03.jsonl +0 -0
- data/processed/nyt.test04.jsonl +0 -0
- data/processed/reddit.test00.jsonl +0 -0
- data/processed/reddit.test01.jsonl +0 -0
- data/processed/reddit.test02.jsonl +0 -0
- data/processed/reddit.test03.jsonl +0 -0
- data/processed/reddit.test04.jsonl +0 -0
- generate_reference_files.py +0 -0
- process.py +104 -0
- qg_squadshift.py +79 -0
README.md
ADDED
File without changes
|
data/processed/amazon.test00.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/amazon.test01.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/amazon.test02.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/amazon.test03.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/amazon.test04.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/new_wiki.test00.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/new_wiki.test01.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/new_wiki.test02.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/new_wiki.test03.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test00.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test01.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test02.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test03.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/nyt.test04.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test00.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test01.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test02.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test03.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
data/processed/reddit.test04.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
generate_reference_files.py
ADDED
File without changes
|
process.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
""" Script to process raw SQuADshift file for Question Generation format
|
2 |
+
cd data/processed
|
3 |
+
gsplit -b 6M -d --additional-suffix=.jsonl new_wiki.test.jsonl new_wiki.test
|
4 |
+
gsplit -b 6M -d --additional-suffix=.jsonl nyt.test.jsonl nyt.test
|
5 |
+
gsplit -b 6M -d --additional-suffix=.jsonl reddit.test.jsonl reddit.test
|
6 |
+
gsplit -b 6M -d --additional-suffix=.jsonl amazon.test.jsonl amazon.test
|
7 |
+
|
8 |
+
rm -rf new_wiki.test.jsonl
|
9 |
+
rm -rf nyt.test.jsonl
|
10 |
+
rm -rf reddit.test.jsonl
|
11 |
+
rm -rf amazon.test.jsonl
|
12 |
+
"""
|
13 |
+
import json
|
14 |
+
import os
|
15 |
+
import re
|
16 |
+
import spacy
|
17 |
+
|
18 |
+
from tqdm import tqdm
|
19 |
+
from datasets import load_dataset
|
20 |
+
|
21 |
+
DATASET_NAME = "squadshifts"
|
22 |
+
DATASET_TYPES = ['new_wiki', 'nyt', 'reddit', 'amazon']
|
23 |
+
HIGHLIGHT_TOKEN = '<hl>'
|
24 |
+
SPLITTER = spacy.load('en_core_web_sm')
|
25 |
+
|
26 |
+
|
27 |
+
def get_sentence(document: str): return [str(sent) for sent in SPLITTER(document).sents]
|
28 |
+
|
29 |
+
|
30 |
+
def process_single_data(question: str, paragraph: str, answer: str):
|
31 |
+
""" Convert single raw json data into QG format """
|
32 |
+
example = {'question': question, 'paragraph': paragraph, 'answer': answer}
|
33 |
+
start = example['paragraph'].find(example['answer'])
|
34 |
+
end = start + len(answer)
|
35 |
+
assert paragraph[start:end] == answer
|
36 |
+
# get sentence
|
37 |
+
before_tmp = get_sentence(example['paragraph'][:start])
|
38 |
+
if len(before_tmp) == 0:
|
39 |
+
before = ''
|
40 |
+
before_sentence = ''
|
41 |
+
else:
|
42 |
+
if before_tmp[-1].endswith('.'):
|
43 |
+
before = ' '.join(before_tmp)
|
44 |
+
before_sentence = ''
|
45 |
+
else:
|
46 |
+
before = ' '.join(before_tmp[:-1])
|
47 |
+
before_sentence = before_tmp[-1]
|
48 |
+
before_sentence = before_sentence if before_sentence.endswith(' ') else f'{before_sentence} '
|
49 |
+
after_tmp = get_sentence(example['paragraph'][start + len(example['answer']):])
|
50 |
+
if len(after_tmp) == 0:
|
51 |
+
after = ''
|
52 |
+
after_sentence = ''
|
53 |
+
else:
|
54 |
+
after = ' '.join(after_tmp[1:])
|
55 |
+
after_sentence = after_tmp[0]
|
56 |
+
after_sentence = after_sentence if after_sentence.startswith(' ') else f' {after_sentence}'
|
57 |
+
example['sentence'] = f"{before_sentence}{example['answer']}{after_sentence}"
|
58 |
+
|
59 |
+
# get paragraph_sentence
|
60 |
+
before = '' if before == '' else f'{before} '
|
61 |
+
after = '' if after == '' else f' {after}'
|
62 |
+
source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after)
|
63 |
+
example['paragraph_sentence'] = re.sub(r'\s+', ' ', source_text)
|
64 |
+
|
65 |
+
# get paragraph_answer
|
66 |
+
source_text = '{0}{1} {2} {1}{3}'.format(
|
67 |
+
example['paragraph'][:start], HIGHLIGHT_TOKEN, example['answer'],
|
68 |
+
example['paragraph'][start + len(example['answer']):])
|
69 |
+
example['paragraph_answer'] = re.sub(r'\s+', ' ', source_text)
|
70 |
+
|
71 |
+
# get sentence_answer
|
72 |
+
if len(before_tmp) == 0 or before_tmp[-1].endswith('.'):
|
73 |
+
before = ''
|
74 |
+
else:
|
75 |
+
before = before_tmp[-1] if before_tmp[-1].endswith(' ') else f'{before_tmp[-1]} '
|
76 |
+
if len(after_tmp) == 0:
|
77 |
+
after = ''
|
78 |
+
else:
|
79 |
+
after = after_tmp[0] if after_tmp[0].startswith(' ') else f' {after_tmp[0]}'
|
80 |
+
source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after)
|
81 |
+
example['sentence_answer'] = re.sub(r'\s+', ' ', source_text)
|
82 |
+
|
83 |
+
return example
|
84 |
+
|
85 |
+
|
86 |
+
if __name__ == '__main__':
|
87 |
+
output = './data/processed'
|
88 |
+
os.makedirs(output, exist_ok=True)
|
89 |
+
for data_type in DATASET_TYPES:
|
90 |
+
dataset = load_dataset(DATASET_NAME, data_type)
|
91 |
+
for _split in dataset.keys():
|
92 |
+
tmp_dataset = dataset[_split]
|
93 |
+
with open(f'{output}/{data_type}.{_split}.jsonl', 'w') as f:
|
94 |
+
for single_data in tqdm(tmp_dataset):
|
95 |
+
answer_str = single_data['answers']['text']
|
96 |
+
question_str = single_data['question']
|
97 |
+
paragraph_str = single_data['context']
|
98 |
+
if type(answer_str) == list:
|
99 |
+
answer_str = answer_str[0]
|
100 |
+
assert type(answer_str) is str, answer_str
|
101 |
+
assert type(question_str) is str, question_str
|
102 |
+
assert type(paragraph_str) is str, paragraph_str
|
103 |
+
single_data = process_single_data(question=question_str, paragraph=paragraph_str, answer=answer_str)
|
104 |
+
f.write(json.dumps(single_data) + '\n')
|
qg_squadshift.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import datasets
|
3 |
+
|
4 |
+
logger = datasets.logging.get_logger(__name__)
|
5 |
+
_DESCRIPTION = """[SQuAD Shifts](https://modestyachts.github.io/squadshifts-website/index.html) dataset for question generation (QG) task."""
|
6 |
+
_URL = 'https://huggingface.co/datasets/asahi417/qg_squadshift/raw/main/data/processed'
|
7 |
+
_DOMAINS = ['new_wiki', 'nyt', 'reddit', 'amazon']
|
8 |
+
_FILESIZE = [4, 5, 5, 5]
|
9 |
+
|
10 |
+
|
11 |
+
class QGSQuADShiftsConfig(datasets.BuilderConfig):
|
12 |
+
"""BuilderConfig for SquadQG"""
|
13 |
+
|
14 |
+
def __init__(self, **kwargs):
|
15 |
+
"""BuilderConfig for SquadQG.
|
16 |
+
Args:
|
17 |
+
**kwargs: keyword arguments forwarded to super.
|
18 |
+
"""
|
19 |
+
super(QGSQuADShiftsConfig, self).__init__(**kwargs)
|
20 |
+
|
21 |
+
|
22 |
+
class QGSQuADShifts(datasets.GeneratorBasedBuilder):
|
23 |
+
|
24 |
+
BUILDER_CONFIGS = [QGSQuADShiftsConfig(name="default", description="All domain.")]
|
25 |
+
BUILDER_CONFIGS += [QGSQuADShiftsConfig(name=i, description=i) for i in _DOMAINS]
|
26 |
+
|
27 |
+
def _info(self):
|
28 |
+
return datasets.DatasetInfo(
|
29 |
+
description=_DESCRIPTION,
|
30 |
+
features=datasets.Features(
|
31 |
+
{
|
32 |
+
"answer": datasets.Value("string"),
|
33 |
+
"question": datasets.Value("string"),
|
34 |
+
"sentence": datasets.Value("string"),
|
35 |
+
"paragraph": datasets.Value("string"),
|
36 |
+
"sentence_answer": datasets.Value("string"),
|
37 |
+
"paragraph_answer": datasets.Value("string"),
|
38 |
+
"paragraph_sentence": datasets.Value("string"),
|
39 |
+
"paragraph_id": datasets.Value("string"),
|
40 |
+
"question_subj_level": datasets.Value("int32"),
|
41 |
+
"answer_subj_level": datasets.Value("int32"),
|
42 |
+
"domain": datasets.Value("string"),
|
43 |
+
}
|
44 |
+
),
|
45 |
+
supervised_keys=None,
|
46 |
+
homepage="https://github.com/asahi417/lm-question-generation"
|
47 |
+
)
|
48 |
+
|
49 |
+
def _split_generators(self, dl_manager):
|
50 |
+
if self.config.name == 'default':
|
51 |
+
downloaded_file = dl_manager.download_and_extract({
|
52 |
+
'train': [f"{_URL}/{i}.train.jsonl" for i in _DOMAINS],
|
53 |
+
'dev': [f"{_URL}/{i}.dev.jsonl" for i in _DOMAINS],
|
54 |
+
'test': [f"{_URL}/{i}.test.jsonl" for i in _DOMAINS]
|
55 |
+
})
|
56 |
+
else:
|
57 |
+
downloaded_file = dl_manager.download_and_extract({
|
58 |
+
'train': [f"{_URL}/{self.config.name}.train.jsonl"],
|
59 |
+
'dev': [f"{_URL}/{self.config.name}.dev.jsonl"],
|
60 |
+
'test': [f"{_URL}/{self.config.name}.test.jsonl"]
|
61 |
+
})
|
62 |
+
return [
|
63 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": downloaded_file["train"]}),
|
64 |
+
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": downloaded_file["dev"]}),
|
65 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": downloaded_file["test"]})
|
66 |
+
]
|
67 |
+
|
68 |
+
def _generate_examples(self, filepaths):
|
69 |
+
_key = 0
|
70 |
+
for filepath in filepaths:
|
71 |
+
logger.info("generating examples from = %s", filepath)
|
72 |
+
with open(filepath, encoding="utf-8") as f:
|
73 |
+
_list = f.read().split('\n')
|
74 |
+
if _list[-1] == '':
|
75 |
+
_list = _list[:-1]
|
76 |
+
for i in _list:
|
77 |
+
data = json.loads(i)
|
78 |
+
yield _key, data
|
79 |
+
_key += 1
|