Datasets:
lmqg
/

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Source Datasets:
subjqa
ArXiv:
Tags:
question-generation
License:
File size: 6,605 Bytes
d22f92b
 
a8807ea
 
 
 
d22f92b
c32e7dd
 
 
 
 
 
 
 
 
 
d22f92b
 
 
 
c32e7dd
 
 
 
 
 
 
 
 
 
 
d22f92b
 
 
 
f8e6d3e
d22f92b
f8e6d3e
 
d22f92b
 
 
 
 
f8e6d3e
d22f92b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f8e6d3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1589ced
 
f8e6d3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93add4d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
""" Script to process raw SQuADshift file for Question Generation format
cd data/processed
gsplit -l 1500 -d --additional-suffix=.jsonl new_wiki.test.jsonl new_wiki.test
gsplit -l 1500 -d --additional-suffix=.jsonl nyt.test.jsonl nyt.test
gsplit -l 1500 -d --additional-suffix=.jsonl reddit.test.jsonl reddit.test
gsplit -l 1500 -d --additional-suffix=.jsonl amazon.test.jsonl amazon.test

gsplit -l 1500 -d --additional-suffix=.jsonl new_wiki.train.jsonl new_wiki.train
gsplit -l 1500 -d --additional-suffix=.jsonl nyt.train.jsonl nyt.train
gsplit -l 1500 -d --additional-suffix=.jsonl reddit.train.jsonl reddit.train
gsplit -l 1500 -d --additional-suffix=.jsonl amazon.train.jsonl amazon.train

gsplit -l 1500 -d --additional-suffix=.jsonl new_wiki.validation.jsonl new_wiki.validation
gsplit -l 1500 -d --additional-suffix=.jsonl nyt.validation.jsonl nyt.validation
gsplit -l 1500 -d --additional-suffix=.jsonl reddit.validation.jsonl reddit.validation
gsplit -l 1500 -d --additional-suffix=.jsonl amazon.validation.jsonl amazon.validation

rm -rf new_wiki.test.jsonl
rm -rf nyt.test.jsonl
rm -rf reddit.test.jsonl
rm -rf amazon.test.jsonl

rm -rf new_wiki.train.jsonl
rm -rf nyt.train.jsonl
rm -rf reddit.train.jsonl
rm -rf amazon.train.jsonl

rm -rf new_wiki.validation.jsonl
rm -rf nyt.validation.jsonl
rm -rf reddit.validation.jsonl
rm -rf amazon.validation.jsonl

"""
import json
import os
import re
from random import shuffle, seed
from tqdm import tqdm

import spacy
from datasets import load_dataset

DATASET_NAME = "squadshifts"
DATASET_TYPES = ['new_wiki', 'nyt', 'reddit', 'amazon']
HIGHLIGHT_TOKEN = '<hl>'
GENERATE_TEST_SPLIT = True
SPLITTER = spacy.load('en_core_web_sm')


def get_sentence(document: str): return [str(sent) for sent in SPLITTER(document).sents]


def process_single_data(question: str, paragraph: str, answer: str):
    """ Convert single raw json data into QG format """
    example = {'question': question, 'paragraph': paragraph, 'answer': answer}
    start = example['paragraph'].find(example['answer'])
    end = start + len(answer)
    assert paragraph[start:end] == answer
    # get sentence
    before_tmp = get_sentence(example['paragraph'][:start])
    if len(before_tmp) == 0:
        before = ''
        before_sentence = ''
    else:
        if before_tmp[-1].endswith('.'):
            before = ' '.join(before_tmp)
            before_sentence = ''
        else:
            before = ' '.join(before_tmp[:-1])
            before_sentence = before_tmp[-1]
            before_sentence = before_sentence if before_sentence.endswith(' ') else f'{before_sentence} '
    after_tmp = get_sentence(example['paragraph'][start + len(example['answer']):])
    if len(after_tmp) == 0:
        after = ''
        after_sentence = ''
    else:
        after = ' '.join(after_tmp[1:])
        after_sentence = after_tmp[0]
        after_sentence = after_sentence if after_sentence.startswith(' ') else f' {after_sentence}'
    example['sentence'] = f"{before_sentence}{example['answer']}{after_sentence}"

    # get paragraph_sentence
    before = '' if before == '' else f'{before} '
    after = '' if after == '' else f' {after}'
    source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after)
    example['paragraph_sentence'] = re.sub(r'\s+', ' ', source_text)

    # get paragraph_answer
    source_text = '{0}{1} {2} {1}{3}'.format(
        example['paragraph'][:start], HIGHLIGHT_TOKEN, example['answer'],
        example['paragraph'][start + len(example['answer']):])
    example['paragraph_answer'] = re.sub(r'\s+', ' ', source_text)

    # get sentence_answer
    if len(before_tmp) == 0 or before_tmp[-1].endswith('.'):
        before = ''
    else:
        before = before_tmp[-1] if before_tmp[-1].endswith(' ') else f'{before_tmp[-1]} '
    if len(after_tmp) == 0:
        after = ''
    else:
        after = after_tmp[0] if after_tmp[0].startswith(' ') else f' {after_tmp[0]}'
    source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after)
    example['sentence_answer'] = re.sub(r'\s+', ' ', source_text)

    return example


if __name__ == '__main__':
    output = './data/processed'
    os.makedirs(output, exist_ok=True)
    for data_type in DATASET_TYPES:
        dataset = load_dataset(DATASET_NAME, data_type)
        _split = 'test'
        tmp_dataset = dataset[_split]
        full_data = []
        for single_data in tqdm(tmp_dataset):
            question_str = single_data['question']  #.replace("\n", ".").replace('"', "'")
            paragraph_str = single_data['context']  #.replace("\n", ".").replace('"', "'")
            answer_str = single_data['answers']['text']
            if type(answer_str) == list:
                answer_str = answer_str[0]
            assert type(answer_str) is str, answer_str
            assert type(question_str) is str, question_str
            assert type(paragraph_str) is str, paragraph_str
            tmp_data = process_single_data(question=question_str, paragraph=paragraph_str, answer=answer_str)
            tmp_data['paragraph_id'] = single_data['id']
            full_data.append(tmp_data)

        # split test into train/valid/test
        test_size = int(len(full_data)/2)
        train_size = int((len(full_data) - test_size) * 2/3)
        # train_size = 2500
        valid_size = len(full_data) - train_size - test_size
        assert train_size + test_size + valid_size == len(full_data), f"{train_size}, {test_size}, {valid_size}"
        paragraph_ids = list(set([i['paragraph_id'] for i in full_data]))
        data_dict = {p: [i for i in full_data if i['paragraph_id'] == p] for p in paragraph_ids}
        seed(0)
        shuffle(paragraph_ids)
        lines_train = []
        lines_test = []
        lines_valid = []

        for i in paragraph_ids:
            if len(lines_test) < test_size:
                lines_test += data_dict[i]
            elif len(lines_train) < train_size:
                lines_train += data_dict[i]
            else:
                lines_valid += data_dict[i]
        print(f'STATS(train/valid/test): {data_type}| {len(lines_train)}/{len(lines_valid)}/{len(lines_test)}')
        with open(f'{output}/{data_type}.test.jsonl', 'w') as f:
            f.write('\n'.join([json.dumps(i) for i in lines_test]))
        with open(f'{output}/{data_type}.train.jsonl', 'w') as f:
            f.write('\n'.join([json.dumps(i) for i in lines_train]))
        with open(f'{output}/{data_type}.validation.jsonl', 'w') as f:
            f.write('\n'.join([json.dumps(i) for i in lines_valid]))