super_tweeteval / process /unify_sp_symbol.py
asahi417's picture
fix dataset
7ad0deb
raw
history blame
1.15 kB
import json
import re
from glob import glob
# for i in glob("data/tweet_topic/*.jsonl"):
# with open(i) as f:
# data = [json.loads(j) for j in f.readlines()]
# for d in data:
# for c in ['text']:
# d[c] = d[c].replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
# for t in re.findall(r'{@[^@^}]*@}', d[c]):
# d[c] = d[c].replace(t, t.replace("{@", "@").replace("@}", "").replace(" ", "_"))
# with open(i, "w") as f:
# f.write("\n".join([json.dumps(j) for j in data]))
for i in glob("data/tweet_ner7/*.jsonl"):
with open(i) as f:
data = [json.loads(j) for j in f.readlines()]
for d in data:
d['text'] = d['text'].replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
d['text'] = re.sub(r'\{@([^@].)@}', r'\1', d['text'])
for t in re.findall(r'{@[^@^}]*@}', d['text']):
t_new = t.replace("{@", "@").replace("@}", "").replace(" ", "_")
d['text'] = d['text'].replace(t, t_new)
d['text_tokenized'] =
with open(i, "w") as f:
f.write("\n".join([json.dumps(j) for j in data]))