asahi417 commited on
Commit
f63efd0
1 Parent(s): 42e7d1a
.gitignore ADDED
@@ -0,0 +1 @@
 
1
+ cache
README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license:
5
+ - other
6
+ multilinguality:
7
+ - monolingual
8
+ size_categories:
9
+ - 1K<n<10K
10
+ pretty_name: ConceptNet with High Confidence
11
+ ---
12
+ # Dataset Card for "relbert/conceptnet_high_confidence"
13
+ ## Dataset Description
14
+ - **Repository:** [RelBERT](https://github.com/asahi417/relbert)
15
+ - **Paper:** [https://home.ttic.edu/~kgimpel/commonsense.html](https://home.ttic.edu/~kgimpel/commonsense.html)
16
+ - **Dataset:** High Confidence Subset of ConceptNet
17
+
18
+ ### Dataset Summary
19
+ The selected subset of ConceptNet used in [this work](https://home.ttic.edu/~kgimpel/commonsense.html), which compiled
20
+ to fine-tune [RelBERT](https://github.com/asahi417/relbert) model.
21
+
22
+ ## Dataset Structure
23
+ ### Data Instances
24
+ An example of `train` looks as follows.
25
+ ```
26
+ {
27
+ "relation_type": "AtLocation",
28
+ "positives": [["fish", "water"], ["cloud", "sky"], ["child", "school"], ... ],
29
+ "negatives": [["pen", "write"], ["sex", "fun"], ["soccer", "sport"], ["fish", "school"], ... ]
30
+ }
31
+ ```
32
+
33
+ ### Data Splits
34
+ | name |train|validation|
35
+ |---------|----:|---------:|
36
+ |conceptnet_high_confidence| 25 | 24|
37
+
38
+ ### Number of Positive/Negative Word-pairs in each Split
39
+
40
+ | relation_type | positive (train) | negative (train) | positive (validation) | negative (validation) |
41
+ |:-----------------|-------------------:|-------------------:|------------------------:|------------------------:|
42
+ | AtLocation | 383 | 1768 | 97 | 578 |
43
+ | CapableOf | 195 | 1790 | 73 | 600 |
44
+ | Causes | 71 | 1797 | 26 | 595 |
45
+ | CausesDesire | 9 | 1793 | 11 | 595 |
46
+ | CreatedBy | 2 | 1796 | 0 | 0 |
47
+ | DefinedAs | 0 | 0 | 2 | 595 |
48
+ | Desires | 16 | 1794 | 12 | 595 |
49
+ | HasA | 67 | 1814 | 17 | 595 |
50
+ | HasFirstSubevent | 2 | 1796 | 0 | 0 |
51
+ | HasLastSubevent | 2 | 1796 | 3 | 593 |
52
+ | HasPrerequisite | 168 | 1803 | 57 | 592 |
53
+ | HasProperty | 94 | 1801 | 39 | 605 |
54
+ | HasSubevent | 125 | 1798 | 40 | 609 |
55
+ | IsA | 310 | 1764 | 98 | 603 |
56
+ | MadeOf | 17 | 1793 | 7 | 593 |
57
+ | MotivatedByGoal | 14 | 1796 | 11 | 595 |
58
+ | NotCapableOf | 15 | 1793 | 0 | 0 |
59
+ | NotDesires | 4 | 1795 | 4 | 592 |
60
+ | PartOf | 34 | 1801 | 7 | 593 |
61
+ | ReceivesAction | 18 | 1793 | 8 | 593 |
62
+ | SymbolOf | 0 | 0 | 2 | 596 |
63
+ | UsedFor | 249 | 1815 | 81 | 588 |
64
+ | SUM | 1795 | 35896 | 595 | 11305 |
65
+
66
+ ### Citation Information
67
+ ```
68
+ @InProceedings{P16-1137,
69
+ author = "Li, Xiang
70
+ and Taheri, Aynaz
71
+ and Tu, Lifu
72
+ and Gimpel, Kevin",
73
+ title = "Commonsense Knowledge Base Completion",
74
+ booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) ",
75
+ year = "2016",
76
+ publisher = "Association for Computational Linguistics",
77
+ pages = "1445--1455",
78
+ location = "Berlin, Germany",
79
+ doi = "10.18653/v1/P16-1137",
80
+ url = "http://aclweb.org/anthology/P16-1137"
81
+ }
82
+ ```
augment_negative.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from glob import glob
3
+ from itertools import chain
4
+
5
+ for i in glob("dataset/*.jsonl"):
6
+ with open(i) as f:
7
+ tmp = [json.loads(o) for o in f.read().split('\n') if len(o) > 0]
8
+ for r in tmp:
9
+ r['negatives'] = r['negatives'] + list(
10
+ chain(*[o['positives'] for o in tmp if o['relation_type'] != r['relation_type']]))
11
+ with open(i, 'w') as f:
12
+ f.write('\n'.join([json.dumps(r) for r in tmp]))
conceptnet_high_confidence_v2.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import datasets
3
+
4
+ logger = datasets.logging.get_logger(__name__)
5
+ _DESCRIPTION = """[ConceptNet with high confidence](https://home.ttic.edu/~kgimpel/commonsense.html)"""
6
+ _NAME = "conceptnet_high_confidence"
7
+ _VERSION = "2.0.0"
8
+ _CITATION = """
9
+ @inproceedings{li-16,
10
+ title = {Commonsense Knowledge Base Completion},
11
+ author = {Xiang Li and Aynaz Taheri and Lifu Tu and Kevin Gimpel},
12
+ booktitle = {Proc. of ACL},
13
+ year = {2016}
14
+ }
15
+ @InProceedings{P16-1137,
16
+ author = "Li, Xiang
17
+ and Taheri, Aynaz
18
+ and Tu, Lifu
19
+ and Gimpel, Kevin",
20
+ title = "Commonsense Knowledge Base Completion",
21
+ booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers) ",
22
+ year = "2016",
23
+ publisher = "Association for Computational Linguistics",
24
+ pages = "1445--1455",
25
+ location = "Berlin, Germany",
26
+ doi = "10.18653/v1/P16-1137",
27
+ url = "http://aclweb.org/anthology/P16-1137"
28
+ }
29
+ """
30
+
31
+ _HOME_PAGE = "https://github.com/asahi417/relbert"
32
+ _URL = f'https://huggingface.co/datasets/relbert/{_NAME}/raw/main/dataset'
33
+ _URLS = {
34
+ str(datasets.Split.TRAIN): [f'{_URL}/train.jsonl'],
35
+ str(datasets.Split.VALIDATION): [f'{_URL}/valid.jsonl'],
36
+ "full": [f'{_URL}/valid.jsonl', f'{_URL}/train.jsonl'],
37
+ }
38
+
39
+
40
+ class ConceptNetHighConfidenceV2Config(datasets.BuilderConfig):
41
+ """BuilderConfig"""
42
+
43
+ def __init__(self, **kwargs):
44
+ """BuilderConfig.
45
+ Args:
46
+ **kwargs: keyword arguments forwarded to super.
47
+ """
48
+ super(ConceptNetHighConfidenceV2Config, self).__init__(**kwargs)
49
+
50
+
51
+ class ConceptNetHighConfidenceV2(datasets.GeneratorBasedBuilder):
52
+ """Dataset."""
53
+
54
+ BUILDER_CONFIGS = [
55
+ ConceptNetHighConfidenceV2Config(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
56
+ ]
57
+
58
+ def _split_generators(self, dl_manager):
59
+ downloaded_file = dl_manager.download_and_extract(_URLS)
60
+ return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[i]}) for i in _URLS.keys()]
61
+
62
+ def _generate_examples(self, filepaths):
63
+ _key = 0
64
+ for filepath in filepaths:
65
+ logger.info(f"generating examples from = {filepath}")
66
+ with open(filepath, encoding="utf-8") as f:
67
+ _list = [i for i in f.read().split('\n') if len(i) > 0]
68
+ for i in _list:
69
+ data = json.loads(i)
70
+ yield _key, data
71
+ _key += 1
72
+
73
+ def _info(self):
74
+ return datasets.DatasetInfo(
75
+ description=_DESCRIPTION,
76
+ features=datasets.Features(
77
+ {
78
+ "relation_type": datasets.Value("string"),
79
+ "positives": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
80
+ "negatives": datasets.Sequence(datasets.Sequence(datasets.Value("string"))),
81
+ }
82
+ ),
83
+ supervised_keys=None,
84
+ homepage=_HOME_PAGE,
85
+ citation=_CITATION,
86
+ )
dataset/train.jsonl ADDED
The diff for this file is too large to render. See raw diff
dataset/valid.jsonl ADDED
The diff for this file is too large to render. See raw diff
get_stats.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from datasets import load_dataset
3
+
4
+ data = load_dataset('relbert/conceptnet_high_confidence')
5
+ stats = []
6
+ for k in data.keys():
7
+ for i in data[k]:
8
+ stats.append({'relation_type': i['relation_type'], 'split': k, 'positives': len(i['positives']), 'negatives': len(i['negatives'])})
9
+ df = pd.DataFrame(stats)
10
+ df_train = df[df['split'] == 'train']
11
+ df_valid = df[df['split'] == 'validation']
12
+ stats = []
13
+ for r in df['relation_type'].unique():
14
+ _df_t = df_train[df_train['relation_type'] == r]
15
+ _df_v = df_valid[df_valid['relation_type'] == r]
16
+ stats.append({
17
+ 'relation_type': r,
18
+ 'positive (train)': 0 if len(_df_t) == 0 else _df_t['positives'].values[0],
19
+ 'negative (train)': 0 if len(_df_t) == 0 else _df_t['negatives'].values[0],
20
+ 'positive (validation)': 0 if len(_df_v) == 0 else _df_v['positives'].values[0],
21
+ 'negative (validation)': 0 if len(_df_v) == 0 else _df_v['negatives'].values[0],
22
+ })
23
+
24
+ df = pd.DataFrame(stats).sort_values(by=['relation_type'])
25
+ df.index = df.pop('relation_type')
26
+ sum_pairs = df.sum(0)
27
+ df = df.T
28
+ df['SUM'] = sum_pairs
29
+ df = df.T
30
+
31
+ df.to_csv('stats.csv')
32
+ with open('stats.md', 'w') as f:
33
+ f.write(df.to_markdown())
34
+
35
+
process.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ import gzip
4
+ import requests
5
+
6
+ import pandas as pd
7
+
8
+ urls = {
9
+ 'dev1': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev1.txt.gz',
10
+ 'dev2': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev2.txt.gz',
11
+ 'test': 'https://home.ttic.edu/~kgimpel/comsense_resources/test.txt.gz'
12
+ }
13
+ exclude = ['NotCapable', 'NotDesires']
14
+
15
+
16
+ def wget(url, cache_dir: str = './cache'):
17
+ """ wget and uncompress data_iterator """
18
+ os.makedirs(cache_dir, exist_ok=True)
19
+ filename = os.path.basename(url)
20
+ path = f'{cache_dir}/{filename}'
21
+ if os.path.exists(path):
22
+ return path.replace('.gz', '')
23
+ with open(path, "wb") as f:
24
+ r = requests.get(url)
25
+ f.write(r.content)
26
+ with gzip.open(path, 'rb') as f:
27
+ with open(path.replace('.gz', ''), 'wb') as f_write:
28
+ f_write.write(f.read())
29
+ os.remove(path)
30
+ return path.replace('.gz', '')
31
+
32
+
33
+ def read_file(file_name):
34
+ with open(file_name) as f_reader:
35
+ df = pd.DataFrame([i.split('\t') for i in f_reader.read().split('\n') if len(i) > 0],
36
+ columns=['relation', 'head', 'tail', 'flag'])
37
+ df_positive = df[df['flag'] == '1']
38
+ df_negative = df[df['flag'] == '0']
39
+ df_positive.pop('flag')
40
+ df_negative.pop('flag')
41
+ return df_positive, df_negative
42
+
43
+
44
+ if __name__ == '__main__':
45
+ test_p, test_n = read_file(wget(urls['test']))
46
+ dev1_p, dev1_n = read_file(wget(urls['dev1']))
47
+ train_p = pd.concat([test_p, dev1_p])
48
+ train_n = pd.concat([test_n, dev1_n])
49
+ with open(f'dataset/train.jsonl', 'w') as f:
50
+ for relation, df_p in train_p.groupby('relation'):
51
+ if len(df_p) < 2:
52
+ continue
53
+ if relation in exclude:
54
+ continue
55
+ df_n = train_n[train_n['relation'] == relation]
56
+ f.write(json.dumps({
57
+ 'relation_type': relation,
58
+ 'positives': df_p[['head', 'tail']].to_numpy().tolist(),
59
+ 'negatives': df_n[['head', 'tail']].to_numpy().tolist()
60
+ }) + '\n')
61
+
62
+ dev2_p, dev2_n = read_file(wget(urls['dev2']))
63
+ with open(f'dataset/valid.jsonl', 'w') as f:
64
+ for relation, df_p in dev2_p.groupby('relation'):
65
+ if len(df_p) < 2:
66
+ continue
67
+ if relation in exclude:
68
+ continue
69
+ df_n = dev2_n[dev2_n['relation'] == relation]
70
+ f.write(json.dumps({
71
+ 'relation_type': relation,
72
+ 'positives': df_p[['head', 'tail']].to_numpy().tolist(),
73
+ 'negatives': df_n[['head', 'tail']].to_numpy().tolist()
74
+ }) + '\n')
stats.csv ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ relation_type,positive (train),negative (train),positive (validation),negative (validation)
2
+ AtLocation,383,1768,97,578
3
+ CapableOf,195,1790,73,600
4
+ Causes,71,1797,26,595
5
+ CausesDesire,9,1793,11,595
6
+ CreatedBy,2,1796,0,0
7
+ DefinedAs,0,0,2,595
8
+ Desires,16,1794,12,595
9
+ HasA,67,1814,17,595
10
+ HasFirstSubevent,2,1796,0,0
11
+ HasLastSubevent,2,1796,3,593
12
+ HasPrerequisite,168,1803,57,592
13
+ HasProperty,94,1801,39,605
14
+ HasSubevent,125,1798,40,609
15
+ IsA,310,1764,98,603
16
+ MadeOf,17,1793,7,593
17
+ MotivatedByGoal,14,1796,11,595
18
+ NotCapableOf,15,1793,0,0
19
+ NotDesires,4,1795,4,592
20
+ PartOf,34,1801,7,593
21
+ ReceivesAction,18,1793,8,593
22
+ SymbolOf,0,0,2,596
23
+ UsedFor,249,1815,81,588
24
+ SUM,1795,35896,595,11305
stats.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ | relation_type | positive (train) | negative (train) | positive (validation) | negative (validation) |
2
+ |:-----------------|-------------------:|-------------------:|------------------------:|------------------------:|
3
+ | AtLocation | 383 | 1768 | 97 | 578 |
4
+ | CapableOf | 195 | 1790 | 73 | 600 |
5
+ | Causes | 71 | 1797 | 26 | 595 |
6
+ | CausesDesire | 9 | 1793 | 11 | 595 |
7
+ | CreatedBy | 2 | 1796 | 0 | 0 |
8
+ | DefinedAs | 0 | 0 | 2 | 595 |
9
+ | Desires | 16 | 1794 | 12 | 595 |
10
+ | HasA | 67 | 1814 | 17 | 595 |
11
+ | HasFirstSubevent | 2 | 1796 | 0 | 0 |
12
+ | HasLastSubevent | 2 | 1796 | 3 | 593 |
13
+ | HasPrerequisite | 168 | 1803 | 57 | 592 |
14
+ | HasProperty | 94 | 1801 | 39 | 605 |
15
+ | HasSubevent | 125 | 1798 | 40 | 609 |
16
+ | IsA | 310 | 1764 | 98 | 603 |
17
+ | MadeOf | 17 | 1793 | 7 | 593 |
18
+ | MotivatedByGoal | 14 | 1796 | 11 | 595 |
19
+ | NotCapableOf | 15 | 1793 | 0 | 0 |
20
+ | NotDesires | 4 | 1795 | 4 | 592 |
21
+ | PartOf | 34 | 1801 | 7 | 593 |
22
+ | ReceivesAction | 18 | 1793 | 8 | 593 |
23
+ | SymbolOf | 0 | 0 | 2 | 596 |
24
+ | UsedFor | 249 | 1815 | 81 | 588 |
25
+ | SUM | 1795 | 35896 | 595 | 11305 |