File size: 900 Bytes
f4ef793
 
 
 
 
 
 
 
 
 
 
5399d79
 
6d5c22b
5399d79
f4ef793
 
5399d79
f4ef793
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import os
import json
from datasets import load_dataset

os.makedirs("data/tweet_ner7", exist_ok=True)
data = load_dataset("tner/tweetner7")


def process(tmp):
    tmp = [i.to_dict() for _, i in tmp.iterrows()]
    for i in tmp:
        i.pop("id")
        i['gold_label_sequence'] = i.pop('tags').tolist()
        i['text_tokenized'] = i.pop('tokens').tolist()
        i['text'] = ' '.join(i['text_tokenized'])
    return tmp


train = process(data["train_2020"].to_pandas())
val = process(data["validation_2020"].to_pandas())
test = process(data["test_2021"].to_pandas())
with open("data/tweet_ner7/train.jsonl", "w") as f:
    f.write("\n".join([json.dumps(i) for i in train]))
with open("data/tweet_ner7/validation.jsonl", "w") as f:
    f.write("\n".join([json.dumps(i) for i in val]))
with open("data/tweet_ner7/test.jsonl", "w") as f:
    f.write("\n".join([json.dumps(i) for i in test]))