super_tweeteval / process /unify_sp_symbol.py
asahi417's picture
fix dataset
c518314
raw
history blame
1.54 kB
import json
import re
from glob import glob
# for i in glob("data/tweet_topic/*.jsonl"):
# with open(i) as f:
# data = [json.loads(j) for j in f.readlines()]
# for d in data:
# for c in ['text']:
# d[c] = d[c].replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
# for t in re.findall(r'{@[^@^}]*@}', d[c]):
# d[c] = d[c].replace(t, t.replace("{@", "@").replace("@}", "").replace(" ", "_"))
# with open(i, "w") as f:
# f.write("\n".join([json.dumps(j) for j in data]))
for i in glob("data/tweet_ner7/*.jsonl"):
with open(i) as f:
data = [json.loads(j) for j in f.readlines()]
for d in data:
d['text'] = d['text'].replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
d['text_tokenized'] = [y if y != "{{USERNAME}}" else "@user" for y in d['text_tokenized']]
d['text_tokenized'] = [y if y != "{{URL}}" else "{URL}" for y in d['text_tokenized']]
for t in re.findall(r'{@[^@^}]*@}', d['text']):
t_new = t.replace("{@", "@").replace("@}", "").replace(" ", "_")
d['text'] = d['text'].replace(t, t_new)
d['text_tokenized'] = [y if y != t else t_new for y in d['text_tokenized']]
for e in d['entities']:
e['entity'] = e['entity'].replace(t, t_new)
e['entity'] = e['entity'].replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
with open(i, "w") as f:
f.write("\n".join([json.dumps(j) for j in data]))