Datasets:
lmqg
/

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
qag_tweetqa / process.py
asahi417's picture
Update process.py
fd9d15c
raw
history blame
1.43 kB
import json
import os
from random import seed, shuffle
import re
from tqdm import tqdm
from typing import Dict
from datasets import load_dataset
SEP_TOKEN = " | "
def create_data(hf_data):
df = hf_data.to_pandas()
output = []
for tweet, g in df.groupby("Tweet"):
example = {
'paragraph': tweet.replace(SEP_TOKEN, " "),
"paragraph_id": '-'.join(g['qid']),
'questions': [_g.replace(SEP_TOKEN, " ") for _g in g['Question']],
'answers': [_g[0].replace(SEP_TOKEN, " ") for _g in g['Answer']],
}
example["questions_answers"] = SEP_TOKEN.join([f"question: {q}, answer: {a}" for q, a in zip(example["questions"], example["answers"])])
output.append(example)
return output
if __name__ == '__main__':
tweet_qa = load_dataset("tweet_qa")
data_valid = create_data(tweet_qa['validation'])
data_train = create_data(tweet_qa['train'])
seed(1)
test_len = len(data_valid)
shuffle(data_train)
data_test = data_train[:test_len]
data_train = data_train[test_len:]
data_all = {'train': data_train, 'validation': data_valid, 'test': data_test}
output = './data/processed'
os.makedirs(output, exist_ok=True)
for k, _data in data_all.items():
with open('{}/{}.jsonl'.format(output, k), 'w') as f:
for single_data in tqdm(_data):
f.write(json.dumps(single_data) + '\n')