Datasets:
Tasks:
Text Generation
Modalities:
Text
Sub-tasks:
language-modeling
Languages:
English
Size:
10K - 100K
ArXiv:
Tags:
question-generation
License:
File size: 957 Bytes
a8807ea 41dd509 a8807ea 3ad0eec a8807ea 3ad0eec a8807ea 41dd509 aababdb 41dd509 aababdb 41dd509 aababdb 41dd509 a8807ea |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
import os
from glob import glob
from datasets import load_dataset
dataset_name = 'asahi417/qg_squadshifts'
os.makedirs('./reference_files', exist_ok=True)
for split in ['test']:
for domain in ["default", 'new_wiki', 'nyt', 'reddit', 'amazon']:
dataset = load_dataset(dataset_name, domain, split=split)
for data in ['question', 'answer', 'sentence', 'paragraph']:
with open('./reference_files/{}-{}.{}txt'.format(data, split, "" if domain == 'default' else f"{domain}."), 'w') as f:
if data == 'paragraph':
tmp_data = dataset['paragraph_id']
else:
tmp_data = dataset[data]
f.write('\n'.join([i.replace('\n', '.') for i in tmp_data]))
for domain in ['new_wiki', 'nyt', 'reddit', 'amazon']:
length = [len(open(i).read().split('\n')) for i in glob(f'reference_files/*{domain}*.txt')]
assert len(list(set(length))) == 1, length
|