|
""" Process raw T-Rex file. |
|
mkdir data_raw |
|
cd data_raw |
|
wget https://figshare.com/ndownloader/files/8760241 |
|
unzip 8760241 |
|
cd ../ |
|
""" |
|
|
|
import json |
|
import string |
|
import re |
|
import os |
|
from glob import glob |
|
from tqdm import tqdm |
|
|
|
import pandas as pd |
|
|
|
|
|
if not os.path.exists('data/t_rex.raw.jsonl'): |
|
os.makedirs('data', exist_ok=True) |
|
f_writer = open('data/t_rex.raw.jsonl', 'w') |
|
for i in tqdm(glob("data_raw/*.json")): |
|
with open(i) as f: |
|
data = json.load(f) |
|
for _data in data: |
|
for triple in _data['triples']: |
|
p = triple['predicate']['surfaceform'] |
|
o = triple['object']['surfaceform'] |
|
s = triple['subject']['surfaceform'] |
|
if p is None or o is None or s is None: |
|
continue |
|
out = {"predicate": p, "object": o, "subject": s, "title": _data["title"], "text": _data["text"]} |
|
f_writer.write(json.dumps(out) + "\n") |
|
f_writer.close() |
|
|
|
|
|
stopwords = ["he", "she", "they", "it"] |
|
list_alnum = string.ascii_lowercase + '0123456789 ' |
|
|
|
|
|
def filtering(entry): |
|
|
|
def _subfilter(token): |
|
if len(re.findall(rf'[^{list_alnum}]+', token)) != 0: |
|
return False |
|
if token in stopwords: |
|
return False |
|
if token.startswith("www"): |
|
return False |
|
if token.startswith("."): |
|
return False |
|
if token.startswith(","): |
|
return False |
|
if token.startswith("$"): |
|
return False |
|
if token.startswith("+"): |
|
return False |
|
if token.startswith("#"): |
|
return False |
|
return True |
|
|
|
if not _subfilter(entry["object"].lower()): |
|
return False |
|
if not _subfilter(entry["subject"].lower()): |
|
return False |
|
|
|
if entry['object'].islower() and entry['subject'].islower(): |
|
return False |
|
|
|
return True |
|
|
|
|
|
with open(f"data/t_rex.raw.jsonl") as f: |
|
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0] |
|
print(f"[before]: {len(data)}") |
|
data = [i for i in data if filtering(i)] |
|
df = pd.DataFrame(data) |
|
count = df.groupby("predicate")['title'].count() |
|
df = df[[count[p] >= 3 for p in df['predicate']]] |
|
df = df.drop_duplicates() |
|
print(f"[after] : {len(df)}") |
|
print(f"[entity]: {len(set(df['object'].unique().tolist() + df['subject'].unique().tolist()))}") |
|
_df = df[['object', 'subject']] |
|
data = [i.to_dict() for _, i in df.iterrows()] |
|
|
|
with open(f"data/t_rex.filter.jsonl", 'w') as f: |
|
f.write('\n'.join([json.dumps(i) for i in data])) |
|
|