Datasets:
Tasks:
Text Generation
Sub-tasks:
language-modeling
Languages:
Russian
Size:
10K<n<100K
ArXiv:
Tags:
question-generation
License:
update
Browse files- generate_reference_files.py +19 -0
generate_reference_files.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from glob import glob
|
3 |
+
from datasets import load_dataset
|
4 |
+
|
5 |
+
os.makedirs('./reference_files', exist_ok=True)
|
6 |
+
|
7 |
+
|
8 |
+
for split in ['validation', 'test']:
|
9 |
+
dataset = load_dataset('asahi417/qg_ruquad', split=split, download_mode='force_redownload')
|
10 |
+
for data in ['question', 'answer', 'sentence', 'paragraph']:
|
11 |
+
with open('./reference_files/{}-{}.txt'.format(data, split), 'w') as f:
|
12 |
+
if data == 'paragraph':
|
13 |
+
tmp_data = dataset['paragraph_id']
|
14 |
+
else:
|
15 |
+
tmp_data = dataset[data]
|
16 |
+
f.write('\n'.join([i.replace('\n', '.') for i in tmp_data]))
|
17 |
+
|
18 |
+
length = [len(open(i).read().split('\n')) for i in glob(f'reference_files/*{split}.txt')]
|
19 |
+
assert len(list(set(length))) == 1, length
|