Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
1K - 10K
ArXiv:
Tags:
question-generation
License:
Update generate_reference_files.py
Browse files- generate_reference_files.py +16 -0
generate_reference_files.py
CHANGED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from glob import glob
|
3 |
+
from datasets import load_dataset
|
4 |
+
|
5 |
+
os.makedirs('./reference_files', exist_ok=True)
|
6 |
+
|
7 |
+
|
8 |
+
for split in ['validation', 'test']:
|
9 |
+
dataset = load_dataset('lmqg/qag_tweetqa', split=split, download_mode='force_redownload')
|
10 |
+
for data in ['questions_answers', 'paragraph']:
|
11 |
+
with open('./reference_files/{}-{}.txt'.format(data, split), 'w') as f:
|
12 |
+
tmp_data = dataset[data]
|
13 |
+
f.write('\n'.join([i.replace('\n', '') for i in tmp_data]))
|
14 |
+
length = [len(open(i).read().split('\n')) for i in glob(f'reference_files/*{split}.txt')]
|
15 |
+
assert len(list(set(length))) == 1, length
|
16 |
+
assert length[0] == len(dataset), f"{length[0]} != {len(dataset)}"
|