init
Browse files- conceptnet.py +72 -0
- get_stats.py +30 -0
- process.py +30 -20
conceptnet.py
CHANGED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import datasets
|
3 |
+
|
4 |
+
logger = datasets.logging.get_logger(__name__)
|
5 |
+
_DESCRIPTION = """[ConceptNet5](https://ojs.aaai.org/index.php/AAAI/article/view/11164)"""
|
6 |
+
_NAME = "conceptnet"
|
7 |
+
_VERSION = "1.0.0"
|
8 |
+
_CITATION = """
|
9 |
+
@inproceedings{speer2017conceptnet,
|
10 |
+
title={Conceptnet 5.5: An open multilingual graph of general knowledge},
|
11 |
+
author={Speer, Robyn and Chin, Joshua and Havasi, Catherine},
|
12 |
+
booktitle={Thirty-first AAAI conference on artificial intelligence},
|
13 |
+
year={2017}
|
14 |
+
}
|
15 |
+
"""
|
16 |
+
|
17 |
+
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
18 |
+
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/raw/main/dataset'
|
19 |
+
_URLS = {
|
20 |
+
str(datasets.Split.TRAIN): [f'{_URL}/train.jsonl'],
|
21 |
+
str(datasets.Split.VALIDATION): [f'{_URL}/valid.jsonl'],
|
22 |
+
}
|
23 |
+
|
24 |
+
|
25 |
+
class ConceptNetHighConfidenceConfig(datasets.BuilderConfig):
|
26 |
+
"""BuilderConfig"""
|
27 |
+
|
28 |
+
def __init__(self, **kwargs):
|
29 |
+
"""BuilderConfig.
|
30 |
+
Args:
|
31 |
+
**kwargs: keyword arguments forwarded to super.
|
32 |
+
"""
|
33 |
+
super(ConceptNetHighConfidenceConfig, self).__init__(**kwargs)
|
34 |
+
|
35 |
+
|
36 |
+
class ConceptNetHighConfidence(datasets.GeneratorBasedBuilder):
|
37 |
+
"""Dataset."""
|
38 |
+
|
39 |
+
BUILDER_CONFIGS = [
|
40 |
+
ConceptNetHighConfidenceConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
|
41 |
+
]
|
42 |
+
|
43 |
+
def _split_generators(self, dl_manager):
|
44 |
+
downloaded_file = dl_manager.download_and_extract(_URLS)
|
45 |
+
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
|
46 |
+
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION]]
|
47 |
+
|
48 |
+
def _generate_examples(self, filepaths):
|
49 |
+
_key = 0
|
50 |
+
for filepath in filepaths:
|
51 |
+
logger.info(f"generating examples from = {filepath}")
|
52 |
+
with open(filepath, encoding="utf-8") as f:
|
53 |
+
_list = [i for i in f.read().split('\n') if len(i) > 0]
|
54 |
+
for i in _list:
|
55 |
+
data = json.loads(i)
|
56 |
+
yield _key, data
|
57 |
+
_key += 1
|
58 |
+
|
59 |
+
def _info(self):
|
60 |
+
return datasets.DatasetInfo(
|
61 |
+
description=_DESCRIPTION,
|
62 |
+
features=datasets.Features(
|
63 |
+
{
|
64 |
+
"relation_type": datasets.Value("string"),
|
65 |
+
"positives": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
66 |
+
"negatives": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
|
67 |
+
}
|
68 |
+
),
|
69 |
+
supervised_keys=None,
|
70 |
+
homepage=_HOME_PAGE,
|
71 |
+
citation=_CITATION,
|
72 |
+
)
|
get_stats.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from datasets import load_dataset
|
3 |
+
|
4 |
+
data = load_dataset('relbert/conceptnet')
|
5 |
+
stats = []
|
6 |
+
for k in data.keys():
|
7 |
+
for i in data[k]:
|
8 |
+
stats.append({'relation_type': i['relation_type'], 'split': k, 'positives': len(i['positives']), 'negatives': len(i['negatives'])})
|
9 |
+
df = pd.DataFrame(stats)
|
10 |
+
df_train = df[df['split'] == 'train']
|
11 |
+
df_valid = df[df['split'] == 'validation']
|
12 |
+
stats = []
|
13 |
+
for r in df['relation_type'].unique():
|
14 |
+
_df_t = df_train[df_train['relation_type'] == r]
|
15 |
+
_df_v = df_valid[df_valid['relation_type'] == r]
|
16 |
+
stats.append({
|
17 |
+
'relation_type': r,
|
18 |
+
'positive (train)': 0 if len(_df_t) == 0 else _df_t['positives'].values[0],
|
19 |
+
'negative (train)': 0 if len(_df_t) == 0 else _df_t['negatives'].values[0],
|
20 |
+
'positive (validation)': 0 if len(_df_v) == 0 else _df_v['positives'].values[0],
|
21 |
+
'negative (validation)': 0 if len(_df_v) == 0 else _df_v['negatives'].values[0],
|
22 |
+
})
|
23 |
+
|
24 |
+
df = pd.DataFrame(stats).sort_values(by=['relation_type'])
|
25 |
+
df.index = df.pop('relation_type')
|
26 |
+
df.to_csv('stats.csv')
|
27 |
+
with open('stats.md', 'w') as f:
|
28 |
+
f.write(df.to_markdown())
|
29 |
+
|
30 |
+
|
process.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import os
|
2 |
import json
|
3 |
from tqdm import tqdm
|
4 |
-
|
5 |
from datasets import load_dataset
|
6 |
|
7 |
export_dir = 'dataset'
|
@@ -30,23 +30,33 @@ def check(example):
|
|
30 |
|
31 |
dataset = dataset.filter(lambda example: check(example))
|
32 |
relations = list(set(dataset["rel"]))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
'negatives': []
|
52 |
-
}))
|
|
|
1 |
import os
|
2 |
import json
|
3 |
from tqdm import tqdm
|
4 |
+
import numpy as np
|
5 |
from datasets import load_dataset
|
6 |
|
7 |
export_dir = 'dataset'
|
|
|
30 |
|
31 |
dataset = dataset.filter(lambda example: check(example))
|
32 |
relations = list(set(dataset["rel"]))
|
33 |
+
all_word = [os.path.basename(i) for i in dataset['arg1'] + dataset['arg2']]
|
34 |
+
t, c = np.unique(all_word, return_counts=True)
|
35 |
+
freq = {_t: _c for _t, _c in zip(t, c)}
|
36 |
+
|
37 |
+
|
38 |
+
def freq_filter(example): # filter by entity frequency
|
39 |
+
if freq[os.path.basename(example['arg1'])] < 5:
|
40 |
+
return False
|
41 |
+
if freq[os.path.basename(example['arg2'])] < 5:
|
42 |
+
return False
|
43 |
+
return True
|
44 |
+
|
45 |
|
46 |
+
with open(f"{export_dir}/train.jsonl", 'w') as f_train:
|
47 |
+
with open(f"{export_dir}/valid.jsonl", 'w') as f_valid:
|
48 |
+
for r in tqdm(relations):
|
49 |
+
_dataset = dataset.filter(lambda example: example['rel'] == r).shuffle(0)
|
50 |
+
pairs = [[os.path.basename(i['arg1']), os.path.basename(i['arg2'])] for i in _dataset if freq_filter(i)]
|
51 |
+
train_size = int(len(_dataset) * 0.7)
|
52 |
+
f_train.write(json.dumps({
|
53 |
+
'relation_type': os.path.basename(r),
|
54 |
+
'positives': pairs[:train_size],
|
55 |
+
'negatives': []
|
56 |
+
}))
|
57 |
+
if len(pairs[train_size:]) > 0:
|
58 |
+
f_valid.write(json.dumps({
|
59 |
+
'relation_type': os.path.basename(r),
|
60 |
+
'positives': pairs[train_size:],
|
61 |
+
'negatives': []
|
62 |
+
}))
|
|
|
|