|
import os |
|
import json |
|
from tqdm import tqdm |
|
import numpy as np |
|
from datasets import load_dataset |
|
|
|
export_dir = 'dataset' |
|
os.makedirs(export_dir, exist_ok=True) |
|
dataset = load_dataset("conceptnet5", "conceptnet5", split="train") |
|
|
|
|
|
def check(example): |
|
if example['sentence'] == '': |
|
return False |
|
if example['lang'] != 'en': |
|
return False |
|
if example['rel'] == 'None': |
|
return False |
|
atom_1 = os.path.basename(example['arg1']) |
|
atom_2 = os.path.basename(example['arg2']) |
|
for atom in [atom_1, atom_2]: |
|
if len(atom) <= 2: |
|
return False |
|
if len(atom.split(' ')) != 1: |
|
return False |
|
if len(atom.split('_')) != 1: |
|
return False |
|
return True |
|
|
|
|
|
dataset = dataset.filter(lambda example: check(example)) |
|
relations = list(set(dataset["rel"])) |
|
all_word = [os.path.basename(i) for i in dataset['arg1'] + dataset['arg2']] |
|
t, c = np.unique(all_word, return_counts=True) |
|
freq = {_t: _c for _t, _c in zip(t, c)} |
|
|
|
|
|
def freq_filter(example): |
|
if freq[os.path.basename(example['arg1'])] < 5: |
|
return False |
|
if freq[os.path.basename(example['arg2'])] < 5: |
|
return False |
|
return True |
|
|
|
|
|
with open(f"{export_dir}/train.jsonl", 'w') as f_train: |
|
with open(f"{export_dir}/valid.jsonl", 'w') as f_valid: |
|
for r in tqdm(relations): |
|
_dataset = dataset.filter(lambda example: example['rel'] == r).shuffle(0) |
|
pairs = [[os.path.basename(i['arg1']), os.path.basename(i['arg2'])] for i in _dataset if freq_filter(i)] |
|
train_size = int(len(_dataset) * 0.7) |
|
f_train.write(json.dumps({ |
|
'relation_type': os.path.basename(r), |
|
'positives': pairs[:train_size], |
|
'negatives': [] |
|
})) |
|
if len(pairs[train_size:]) > 0: |
|
f_valid.write(json.dumps({ |
|
'relation_type': os.path.basename(r), |
|
'positives': pairs[train_size:], |
|
'negatives': [] |
|
})) |
|
|