Datasets:
lmqg
/

Languages:
Russian
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Source Datasets:
deepset/germanquad
ArXiv:
Tags:
question-generation
License:
asahi417 commited on
Commit
b95a841
1 Parent(s): dedd2df
.gitattributes CHANGED
@@ -35,3 +35,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
35
  *.mp3 filter=lfs diff=lfs merge=lfs -text
36
  *.ogg filter=lfs diff=lfs merge=lfs -text
37
  *.wav filter=lfs diff=lfs merge=lfs -text
38
+ data/processed/test.jsonl filter=lfs diff=lfs merge=lfs -text
39
+ data/processed/train.jsonl filter=lfs diff=lfs merge=lfs -text
40
+ data/processed/validation.jsonl filter=lfs diff=lfs merge=lfs -text
README.md ADDED
File without changes
data/processed/test.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:876be9e86d6ad9fc9eb6172c62b6d26f5655d7f25e4d4d8f3839feb772c33e40
3
+ size 323354268
data/processed/train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ec827524988846ec8060d3f571c0b186ff817210bd3f5bdbbd68634385cd71a
3
+ size 631464161
data/processed/validation.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d9c505701981f912dbc64c1100d7fc2299a603c90d8dd739e7bc065ce36bfec
3
+ size 70239570
process.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ gsplit -l 1500 -d --additional-suffix=.jsonl test.jsonl test
3
+ gsplit -l 1500 -d --additional-suffix=.jsonl train.jsonl train
4
+ gsplit -l 1500 -d --additional-suffix=.jsonl validation.jsonl validation
5
+ rm -rf test.jsonl
6
+ rm -rf train.jsonl
7
+ rm -rf validation.jsonl
8
+ """
9
+ import json
10
+ import os
11
+ import re
12
+ import spacy
13
+ from random import seed, shuffle
14
+ from tqdm import tqdm
15
+ from datasets import load_dataset
16
+
17
+ DATASET_NAME = "sberquad"
18
+ DATASET_TYPES = None
19
+ HIGHLIGHT_TOKEN = '<hl>'
20
+ GENERATE_TEST_SPLIT = False
21
+ SPLITTER = spacy.load('ru_core_news_sm')
22
+
23
+
24
+ def get_sentence(document: str): return [str(sent) for sent in SPLITTER(document).sents]
25
+
26
+
27
+ def process_single_data(question: str, paragraph: str, answer: str):
28
+ """ Convert single raw json data into QG format """
29
+ if paragraph.find(answer) == -1:
30
+ answer = answer.lower()
31
+ if paragraph.find(answer) == -1:
32
+ paragraph = paragraph.lower()
33
+ if paragraph.find(answer) == -1:
34
+ answer = re.sub(r'\W+\Z', '', answer)
35
+ if paragraph.find(answer) == -1:
36
+ answer = re.sub(r'\A\W+', '', answer)
37
+ example = {'question': question, 'paragraph': paragraph, 'answer': answer}
38
+ start = example['paragraph'].find(example['answer'])
39
+ end = start + len(answer)
40
+ if paragraph[start:end] != answer:
41
+ print()
42
+ print(answer)
43
+ print(paragraph)
44
+ print()
45
+ return None
46
+ # get sentence
47
+ before_tmp = get_sentence(example['paragraph'][:start])
48
+ if len(before_tmp) == 0:
49
+ before = ''
50
+ before_sentence = ''
51
+ else:
52
+ if before_tmp[-1].endswith('.'):
53
+ before = ' '.join(before_tmp)
54
+ before_sentence = ''
55
+ else:
56
+ before = ' '.join(before_tmp[:-1])
57
+ before_sentence = before_tmp[-1]
58
+ before_sentence = before_sentence if before_sentence.endswith(' ') else f'{before_sentence} '
59
+ after_tmp = get_sentence(example['paragraph'][start + len(example['answer']):])
60
+ if len(after_tmp) == 0:
61
+ after = ''
62
+ after_sentence = ''
63
+ else:
64
+ after = ' '.join(after_tmp[1:])
65
+ after_sentence = after_tmp[0]
66
+ after_sentence = after_sentence if after_sentence.startswith(' ') else f' {after_sentence}'
67
+ example['sentence'] = f"{before_sentence}{example['answer']}{after_sentence}"
68
+
69
+ # get paragraph_sentence
70
+ before = '' if before == '' else f'{before} '
71
+ after = '' if after == '' else f' {after}'
72
+ source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['sentence'], after)
73
+ example['paragraph_sentence'] = re.sub(r'\s+', ' ', source_text)
74
+
75
+ # get paragraph_answer
76
+ source_text = '{0}{1} {2} {1}{3}'.format(
77
+ example['paragraph'][:start], HIGHLIGHT_TOKEN, example['answer'],
78
+ example['paragraph'][start + len(example['answer']):])
79
+ example['paragraph_answer'] = re.sub(r'\s+', ' ', source_text)
80
+
81
+ # get sentence_answer
82
+ if len(before_tmp) == 0 or before_tmp[-1].endswith('.'):
83
+ before = ''
84
+ else:
85
+ before = before_tmp[-1] if before_tmp[-1].endswith(' ') else f'{before_tmp[-1]} '
86
+ if len(after_tmp) == 0:
87
+ after = ''
88
+ else:
89
+ after = after_tmp[0] if after_tmp[0].startswith(' ') else f' {after_tmp[0]}'
90
+ source_text = '{0}{1} {2} {1}{3}'.format(before, HIGHLIGHT_TOKEN, example['answer'], after)
91
+ example['sentence_answer'] = re.sub(r'\s+', ' ', source_text)
92
+
93
+ return example
94
+
95
+
96
+ if __name__ == '__main__':
97
+ output = './data/processed'
98
+ os.makedirs(output, exist_ok=True)
99
+ if DATASET_TYPES is not None:
100
+ dataset = load_dataset(DATASET_NAME, DATASET_TYPES)
101
+ else:
102
+ dataset = load_dataset(DATASET_NAME)
103
+ for _split in dataset.keys():
104
+ tmp_dataset = dataset[_split]
105
+ with open(f'{output}/{_split}.jsonl', 'w') as f:
106
+ for single_data in tqdm(tmp_dataset):
107
+ question_str = single_data['question']
108
+ paragraph_str = single_data['context']
109
+ answer_str = single_data['answers']['text']
110
+ if type(answer_str) == list:
111
+ answer_str = answer_str[0]
112
+ assert type(answer_str) is str, answer_str
113
+ assert type(question_str) is str, question_str
114
+ assert type(paragraph_str) is str, paragraph_str
115
+ tmp_data = process_single_data(question=question_str, paragraph=paragraph_str, answer=answer_str)
116
+ if tmp_data is None:
117
+ continue
118
+ tmp_data['paragraph_id'] = single_data['id']
119
+ f.write(json.dumps(tmp_data) + '\n')
120
+ if GENERATE_TEST_SPLIT:
121
+ # randomly sample for test set
122
+ with open(f'{output}/train.jsonl') as f:
123
+ lines_train = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
124
+ with open(f'{output}/validation.jsonl') as f:
125
+ size = len([i for i in f.read().split('\n') if len(i) > 0])
126
+ paragraph_ids = list(set([i['paragraph_id'] for i in lines_train]))
127
+ data_train = {p: [i for i in lines_train if i['paragraph_id'] == p] for p in paragraph_ids}
128
+ seed(0)
129
+ shuffle(paragraph_ids)
130
+ data_test = []
131
+ data_train_new = []
132
+ for i in paragraph_ids:
133
+ if len(data_test) < size:
134
+ data_test += data_train[i]
135
+ else:
136
+ data_train_new += data_train[i]
137
+ with open(f'{output}/train.jsonl', 'w') as f:
138
+ f.write('\n'.join([json.dumps(i) for i in data_train_new]))
139
+ with open(f'{output}/test.jsonl', 'w') as f:
140
+ f.write('\n'.join([json.dumps(i) for i in data_test]))
qg_ruquad.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ python -c "from datasets import load_dataset;load_dataset('.')" """
2
+ import json
3
+ from itertools import chain
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+ _DESCRIPTION = """[SberSQuAD](https://huggingface.co/datasets/sberquad) dataset for question generation (QG) task."""
8
+ _URL = 'https://huggingface.co/datasets/asahi417/qg_ruquad/raw/main/data/processed'
9
+ _URLS = {
10
+ str(datasets.Split.TEST): f'{_URL}/test.jsonl',
11
+ str(datasets.Split.TRAIN): f'{_URL}/train.jsonl',
12
+ str(datasets.Split.VALIDATION): f'{_URL}/validation.jsonl'
13
+ }
14
+
15
+
16
+ class QGRuQuADConfig(datasets.BuilderConfig):
17
+ """BuilderConfig for SquadQG"""
18
+
19
+ def __init__(self, **kwargs):
20
+ """BuilderConfig for SquadQG.
21
+ Args:
22
+ **kwargs: keyword arguments forwarded to super.
23
+ """
24
+ super(QGRuQuADConfig, self).__init__(**kwargs)
25
+
26
+
27
+ class QGRuQuAD(datasets.GeneratorBasedBuilder):
28
+
29
+ def _info(self):
30
+ return datasets.DatasetInfo(
31
+ description=_DESCRIPTION,
32
+ features=datasets.Features(
33
+ {
34
+ "answer": datasets.Value("string"),
35
+ "question": datasets.Value("string"),
36
+ "sentence": datasets.Value("string"),
37
+ "paragraph": datasets.Value("string"),
38
+ "sentence_answer": datasets.Value("string"),
39
+ "paragraph_answer": datasets.Value("string"),
40
+ "paragraph_sentence": datasets.Value("string"),
41
+ "paragraph_id": datasets.Value("string")
42
+ }
43
+ ),
44
+ supervised_keys=None,
45
+ homepage="https://github.com/asahi417/lm-question-generation"
46
+ )
47
+
48
+ def _split_generators(self, dl_manager):
49
+ downloaded_file = dl_manager.download_and_extract(_URLS)
50
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
51
+ for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
52
+
53
+ def _generate_examples(self, filepaths):
54
+ _key = 0
55
+ for filepath in filepaths:
56
+ logger.info("generating examples from = %s", filepath)
57
+ with open(filepath, encoding="utf-8") as f:
58
+ _list = f.read().split('\n')
59
+ if _list[-1] == '':
60
+ _list = _list[:-1]
61
+ for i in _list:
62
+ data = json.loads(i)
63
+ yield _key, data
64
+ _key += 1
65
+