File size: 5,600 Bytes
b95a841
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
"""
gsplit -l 1500 -d --additional-suffix=.jsonl test.jsonl test
gsplit -l 1500 -d --additional-suffix=.jsonl train.jsonl train
gsplit -l 1500 -d --additional-suffix=.jsonl validation.jsonl validation
rm -rf test.jsonl
rm -rf train.jsonl
rm -rf validation.jsonl
"""
import json
import os
import re
import spacy
from random import seed, shuffle
from tqdm import tqdm
from datasets import load_dataset

DATASET_NAME = "sberquad"
DATASET_TYPES = None
HIGHLIGHT_TOKEN = '<hl>'
GENERATE_TEST_SPLIT = False
SPLITTER = spacy.load('ru_core_news_sm')


def get_sentence(document: str): return [str(sent) for sent in SPLITTER(document).sents]


def process_single_data(question: str, paragraph: str, answer: str):
    """ Convert single raw json data into QG format """
    if paragraph.find(answer) == -1:
        answer = answer.lower()
    if paragraph.find(answer) == -1:
        paragraph = paragraph.lower()
    if paragraph.find(answer) == -1:
        answer = re.sub(r'\W+\Z', '', answer)
    if paragraph.find(answer) == -1:
        answer = re.sub(r'\A\W+', '', answer)
    example = {'question': question, 'paragraph': paragraph, 'answer': answer}
    start = example['paragraph'].find(example['answer'])
    end = start + len(answer)
    if paragraph[start:end] != answer:
        print()
        print(answer)
        print(paragraph)
        print()
        return None
    # get sentence
    before_tmp = get_sentence(example['paragraph'][:start])
    if len(before_tmp) == 0:
        before = ''
        before_sentence = ''
    else:
        if before_tmp[-1].endswith('.'):
            before = ' '.join(before_tmp)
            before_sentence = ''
        else:
            before = ' '.join(before_tmp[:-1])
            before_sentence = before_tmp[-1]
            before_sentence = before_sentence if before_sentence.endswith(' ') else f'{before_sentence} '
    after_tmp = get_sentence(example['paragraph'][start + len(example['answer']):])
    if len(after_tmp) == 0:
        after = ''
        after_sentence = ''
    else:
        after = ' '.join(after_tmp[1:])
        after_sentence = after_tmp[0]
        after_sentence = after_sentence if after_sentence.startswith(' ') else f' {after_sentence}'
    example['sentence'] = f"{before_sentence}{example['answer']}{after_sentence}"

    # get paragraph_sentence
    before = '' if before == '' else f'{before} '
    after = '' if after == '' else f' {after}'
    source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after)
    example['paragraph_sentence'] = re.sub(r'\s+', ' ', source_text)

    # get paragraph_answer
    source_text = '{0}{1} {2} {1}{3}'.format(
        example['paragraph'][:start], HIGHLIGHT_TOKEN, example['answer'],
        example['paragraph'][start + len(example['answer']):])
    example['paragraph_answer'] = re.sub(r'\s+', ' ', source_text)

    # get sentence_answer
    if len(before_tmp) == 0 or before_tmp[-1].endswith('.'):
        before = ''
    else:
        before = before_tmp[-1] if before_tmp[-1].endswith(' ') else f'{before_tmp[-1]} '
    if len(after_tmp) == 0:
        after = ''
    else:
        after = after_tmp[0] if after_tmp[0].startswith(' ') else f' {after_tmp[0]}'
    source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after)
    example['sentence_answer'] = re.sub(r'\s+', ' ', source_text)

    return example


if __name__ == '__main__':
    output = './data/processed'
    os.makedirs(output, exist_ok=True)
    if DATASET_TYPES is not None:
        dataset = load_dataset(DATASET_NAME, DATASET_TYPES)
    else:
        dataset = load_dataset(DATASET_NAME)
    for _split in dataset.keys():
        tmp_dataset = dataset[_split]
        with open(f'{output}/{_split}.jsonl', 'w') as f:
            for single_data in tqdm(tmp_dataset):
                question_str = single_data['question']
                paragraph_str = single_data['context']
                answer_str = single_data['answers']['text']
                if type(answer_str) == list:
                    answer_str = answer_str[0]
                assert type(answer_str) is str, answer_str
                assert type(question_str) is str, question_str
                assert type(paragraph_str) is str, paragraph_str
                tmp_data = process_single_data(question=question_str, paragraph=paragraph_str, answer=answer_str)
                if tmp_data is None:
                    continue
                tmp_data['paragraph_id'] = single_data['id']
                f.write(json.dumps(tmp_data) + '\n')
    if GENERATE_TEST_SPLIT:
        # randomly sample for test set
        with open(f'{output}/train.jsonl') as f:
            lines_train = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
        with open(f'{output}/validation.jsonl') as f:
            size = len([i for i in f.read().split('\n') if len(i) > 0])
        paragraph_ids = list(set([i['paragraph_id'] for i in lines_train]))
        data_train = {p: [i for i in lines_train if i['paragraph_id'] == p] for p in paragraph_ids}
        seed(0)
        shuffle(paragraph_ids)
        data_test = []
        data_train_new = []
        for i in paragraph_ids:
            if len(data_test) < size:
                data_test += data_train[i]
            else:
                data_train_new += data_train[i]
        with open(f'{output}/train.jsonl', 'w') as f:
            f.write('\n'.join([json.dumps(i) for i in data_train_new]))
        with open(f'{output}/test.jsonl', 'w') as f:
            f.write('\n'.join([json.dumps(i) for i in data_test]))