Datasets:
Tasks:
Text Generation
Sub-tasks:
language-modeling
Languages:
Italian
ArXiv:
Tags:
question-generation
License:
Update process.py
Browse files- process.py +38 -0
process.py
CHANGED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import os
|
3 |
+
from random import seed, shuffle
|
4 |
+
import re
|
5 |
+
from tqdm import tqdm
|
6 |
+
from typing import Dict
|
7 |
+
from datasets import load_dataset
|
8 |
+
|
9 |
+
|
10 |
+
SEP_TOKEN = " | "
|
11 |
+
|
12 |
+
|
13 |
+
def create_data(hf_data):
|
14 |
+
df = hf_data.to_pandas()
|
15 |
+
output = []
|
16 |
+
for paragraph, g in df.groupby("paragraph"):
|
17 |
+
example = {
|
18 |
+
'paragraph': paragraph.replace(SEP_TOKEN, " "),
|
19 |
+
'questions': [_g.replace(SEP_TOKEN, " ") for _g in g['question']],
|
20 |
+
'answers': [_g.replace(SEP_TOKEN, " ") for _g in g['answer']],
|
21 |
+
}
|
22 |
+
example["questions_answers"] = SEP_TOKEN.join([f"question: {q}, answer: {a}" for q, a in zip(example["questions"], example["answers"])])
|
23 |
+
output.append(example)
|
24 |
+
return output
|
25 |
+
|
26 |
+
|
27 |
+
if __name__ == '__main__':
|
28 |
+
qg_squad = load_dataset("lmqg/qg_itquad")
|
29 |
+
data_valid = create_data(qg_squad['validation'])
|
30 |
+
data_train = create_data(qg_squad['train'])
|
31 |
+
data_test = create_data(qg_squad['test'])
|
32 |
+
data_all = {'train': data_train, 'validation': data_valid, 'test': data_test}
|
33 |
+
output = './data/processed'
|
34 |
+
os.makedirs(output, exist_ok=True)
|
35 |
+
for k, _data in data_all.items():
|
36 |
+
with open('{}/{}.jsonl'.format(output, k), 'w') as f:
|
37 |
+
for single_data in tqdm(_data):
|
38 |
+
f.write(json.dumps(single_data) + '\n')
|