File size: 2,176 Bytes
0bc91e1
 
 
f07159f
0bc91e1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f07159f
 
 
 
 
 
 
 
 
 
 
 
0bc91e1
f07159f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import os
import json
from tqdm import tqdm
import numpy as np
from datasets import load_dataset

export_dir = 'dataset'
os.makedirs(export_dir, exist_ok=True)
dataset = load_dataset("conceptnet5", "conceptnet5", split="train")


def check(example):
    if example['sentence'] == '':
        return False
    if example['lang'] != 'en':
        return False
    if example['rel'] == 'None':
        return False
    atom_1 = os.path.basename(example['arg1'])
    atom_2 = os.path.basename(example['arg2'])
    for atom in [atom_1, atom_2]:
        if len(atom) <= 2:  # condition on the number of characters
            return False
        if len(atom.split(' ')) != 1:  # condition on the number of words
            return False
        if len(atom.split('_')) != 1:  # condition on the number of words
            return False
    return True


dataset = dataset.filter(lambda example: check(example))
relations = list(set(dataset["rel"]))
all_word = [os.path.basename(i) for i in dataset['arg1'] + dataset['arg2']]
t, c = np.unique(all_word, return_counts=True)
freq = {_t: _c for _t, _c in zip(t, c)}


def freq_filter(example):  # filter by entity frequency
    if freq[os.path.basename(example['arg1'])] < 5:
        return False
    if freq[os.path.basename(example['arg2'])] < 5:
        return False
    return True


with open(f"{export_dir}/train.jsonl", 'w') as f_train:
    with open(f"{export_dir}/valid.jsonl", 'w') as f_valid:
        for r in tqdm(relations):
            _dataset = dataset.filter(lambda example: example['rel'] == r).shuffle(0)
            pairs = [[os.path.basename(i['arg1']), os.path.basename(i['arg2'])] for i in _dataset if freq_filter(i)]
            train_size = int(len(_dataset) * 0.7)
            f_train.write(json.dumps({
                'relation_type': os.path.basename(r),
                'positives': pairs[:train_size],
                'negatives': []
            }))
            if len(pairs[train_size:]) > 0:
                f_valid.write(json.dumps({
                    'relation_type': os.path.basename(r),
                    'positives': pairs[train_size:],
                    'negatives': []
                }))