|
import os |
|
import json |
|
from datasets import load_dataset |
|
|
|
os.makedirs("data/tweet_ner7", exist_ok=True) |
|
data = load_dataset("tner/tweetner7") |
|
|
|
|
|
def process(tmp): |
|
tmp = [i.to_dict() for _, i in tmp.iterrows()] |
|
for i in tmp: |
|
i.pop("id") |
|
i['gold_label_sequence'] = i.pop('tags').tolist() |
|
i['text_tokenized'] = i.pop('tokens').tolist() |
|
i['text'] = ' '.join(i['text_tokenized']) |
|
return tmp |
|
|
|
|
|
train = process(data["train_2020"].to_pandas()) |
|
val = process(data["validation_2020"].to_pandas()) |
|
test = process(data["test_2021"].to_pandas()) |
|
with open("data/tweet_ner7/train.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in train])) |
|
with open("data/tweet_ner7/validation.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in val])) |
|
with open("data/tweet_ner7/test.jsonl", "w") as f: |
|
f.write("\n".join([json.dumps(i) for i in test])) |
|
|
|
|
|
|
|
|