File size: 1,397 Bytes
36baa0d
 
 
 
 
7ad0deb
 
 
 
 
 
 
 
 
 
 
 
6b445fb
 
 
7ad0deb
f681059
 
 
7ad0deb
 
 
f681059
 
 
 
6b445fb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
import json
import re

from glob import glob

# for i in glob("data/tweet_topic/*.jsonl"):
#     with open(i) as f:
#         data = [json.loads(j) for j in f.readlines()]
#     for d in data:
#         for c in ['text']:
#             d[c] = d[c].replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
#             for t in re.findall(r'{@[^@^}]*@}', d[c]):
#                 d[c] = d[c].replace(t, t.replace("{@", "@").replace("@}", "").replace(" ", "_"))
#     with open(i, "w") as f:
#         f.write("\n".join([json.dumps(j) for j in data]))

for i in glob("data/tweet_ner7/*.jsonl"):
    with open(i) as f:
        data = [json.loads(j) for j in f.readlines()]
    for d in data:
        d['text'] = d['text'].replace("{{USERNAME}}", "@user").replace("{{URL}}", "{URL}")
        d['text_tokenized'] = [y if y != "{{USERNAME}}" else "@user" for y in d['text_tokenized']]
        d['text_tokenized'] = [y if y != "{{URL}}" else "{URL}" for y in d['text_tokenized']]

        for t in re.findall(r'{@[^@^}]*@}', d['text']):
            t_new = t.replace("{@", "@").replace("@}", "").replace(" ", "_")
            d['text'] = d['text'].replace(t, t_new)
            d['text_tokenized'] = [y if y != t else t_new for y in d['text_tokenized']]
            for e in d['entities']:
                e['']

    with open(i, "w") as f:
        f.write("\n".join([json.dumps(j) for j in data]))