Datasets:
lmqg
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
File size: 1,430 Bytes
d096a58
 
 
 
 
 
 
 
 
fd9d15c
d096a58
55a3716
 
 
 
 
 
 
 
 
 
 
fd9d15c
55a3716
f481065
d096a58
 
 
 
55a3716
 
d096a58
55a3716
 
 
 
 
d096a58
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import json
import os
from random import seed, shuffle
import re
from tqdm import tqdm
from typing import Dict
from datasets import load_dataset


SEP_TOKEN = " | "


def create_data(hf_data):
    df = hf_data.to_pandas()
    output = []
    for tweet, g in df.groupby("Tweet"):
        example = {
            'paragraph': tweet.replace(SEP_TOKEN, " "),
            "paragraph_id": '-'.join(g['qid']),
            'questions': [_g.replace(SEP_TOKEN, " ") for _g in g['Question']],
            'answers': [_g[0].replace(SEP_TOKEN, " ") for _g in g['Answer']],
        }
        example["questions_answers"] = SEP_TOKEN.join([f"question: {q}, answer: {a}" for q, a in zip(example["questions"], example["answers"])])
        output.append(example)
    return output


if __name__ == '__main__':
    tweet_qa = load_dataset("tweet_qa")
    data_valid = create_data(tweet_qa['validation'])
    data_train = create_data(tweet_qa['train'])
    seed(1)
    test_len = len(data_valid)
    shuffle(data_train)
    data_test = data_train[:test_len]
    data_train = data_train[test_len:]
    data_all = {'train': data_train, 'validation': data_valid, 'test': data_test}
    output = './data/processed'
    os.makedirs(output, exist_ok=True)
    for k, _data in data_all.items():
        with open('{}/{}.jsonl'.format(output, k), 'w') as f:
            for single_data in tqdm(_data):
                f.write(json.dumps(single_data) + '\n')