asahi417 commited on
Commit
0bc91e1
1 Parent(s): 30f2448
Files changed (3) hide show
  1. README.md +72 -0
  2. conceptnet.py +0 -0
  3. process.py +52 -0
README.md ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - en
4
+ license:
5
+ - other
6
+ multilinguality:
7
+ - monolingual
8
+ size_categories:
9
+ - 1K<n<10K
10
+ pretty_name: ConceptNet
11
+ ---
12
+ # Dataset Card for "relbert/conceptnet"
13
+ ## Dataset Description
14
+ - **Repository:** [RelBERT](https://github.com/asahi417/relbert)
15
+ - **Paper:** [https://ojs.aaai.org/index.php/AAAI/article/view/11164](https://ojs.aaai.org/index.php/AAAI/article/view/11164)
16
+ - **Dataset:** ConceptNet5
17
+
18
+ ### Dataset Summary
19
+ ConceptNet5, which compiled to fine-tune [RelBERT](https://github.com/asahi417/relbert) model.
20
+
21
+ ## Dataset Structure
22
+ ### Data Instances
23
+ An example of `train` looks as follows.
24
+ ```
25
+ {
26
+ "relation_type": "AtLocation",
27
+ "positives": [["fish", "water"], ["cloud", "sky"], ["child", "school"], ... ],
28
+ "negatives": [["pen", "write"], ["sex", "fun"], ["soccer", "sport"], ["fish", "school"], ... ]
29
+ }
30
+ ```
31
+
32
+ ### Data Splits
33
+ | name |train|validation|
34
+ |---------|----:|---------:|
35
+ |conceptnet| 25 | 24|
36
+
37
+ ### Number of Positive/Negative Word-pairs in each Split
38
+
39
+ | relation_type | positive (train) | negative (train) | positive (validation) | negative (validation) |
40
+ |:-----------------|-------------------:|-------------------:|------------------------:|------------------------:|
41
+ | AtLocation | 383 | 356 | 97 | 80 |
42
+ | CapableOf | 195 | 190 | 73 | 78 |
43
+ | Causes | 71 | 73 | 26 | 26 |
44
+ | CausesDesire | 9 | 7 | 11 | 11 |
45
+ | CreatedBy | 2 | 3 | 0 | 0 |
46
+ | DefinedAs | 0 | 0 | 2 | 2 |
47
+ | Desires | 16 | 15 | 12 | 12 |
48
+ | HasA | 67 | 86 | 17 | 17 |
49
+ | HasFirstSubevent | 2 | 3 | 0 | 0 |
50
+ | HasLastSubevent | 2 | 3 | 3 | 1 |
51
+ | HasPrerequisite | 168 | 176 | 57 | 54 |
52
+ | HasProperty | 94 | 100 | 39 | 49 |
53
+ | HasSubevent | 125 | 128 | 40 | 54 |
54
+ | IsA | 310 | 279 | 98 | 106 |
55
+ | MadeOf | 17 | 15 | 7 | 5 |
56
+ | MotivatedByGoal | 14 | 15 | 11 | 11 |
57
+ | NotCapableOf | 15 | 13 | 0 | 0 |
58
+ | NotDesires | 4 | 4 | 4 | 1 |
59
+ | PartOf | 34 | 40 | 7 | 5 |
60
+ | ReceivesAction | 18 | 16 | 8 | 6 |
61
+ | SymbolOf | 0 | 0 | 2 | 3 |
62
+ | UsedFor | 249 | 269 | 81 | 74 |
63
+
64
+ ### Citation Information
65
+ ```
66
+ @inproceedings{speer2017conceptnet,
67
+ title={Conceptnet 5.5: An open multilingual graph of general knowledge},
68
+ author={Speer, Robyn and Chin, Joshua and Havasi, Catherine},
69
+ booktitle={Thirty-first AAAI conference on artificial intelligence},
70
+ year={2017}
71
+ }
72
+ ```
conceptnet.py ADDED
File without changes
process.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from tqdm import tqdm
4
+
5
+ from datasets import load_dataset
6
+
7
+ export_dir = 'dataset'
8
+ os.makedirs(export_dir, exist_ok=True)
9
+ dataset = load_dataset("conceptnet5", "conceptnet5", split="train")
10
+
11
+
12
+ def check(example):
13
+ if example['sentence'] == '':
14
+ return False
15
+ if example['lang'] != 'en':
16
+ return False
17
+ if example['rel'] == 'None':
18
+ return False
19
+ atom_1 = os.path.basename(example['arg1'])
20
+ atom_2 = os.path.basename(example['arg2'])
21
+ for atom in [atom_1, atom_2]:
22
+ if len(atom) <= 2: # condition on the number of characters
23
+ return False
24
+ if len(atom.split(' ')) != 1: # condition on the number of words
25
+ return False
26
+ if len(atom.split('_')) != 1: # condition on the number of words
27
+ return False
28
+ return True
29
+
30
+
31
+ dataset = dataset.filter(lambda example: check(example))
32
+ relations = list(set(dataset["rel"]))
33
+
34
+ for r in tqdm(relations):
35
+
36
+ _dataset = dataset.filter(lambda example: example['rel'] == r)
37
+ _dataset = _dataset.shuffle(0)
38
+ train_size = int(len(_dataset) * 0.7)
39
+
40
+ with open(f"{export_dir}/train.{os.path.basename(r)}.jsonl", 'w') as f:
41
+ f.write(json.dumps({
42
+ 'relation_type': os.path.basename(r),
43
+ 'positives': [[i['arg1'], i['arg2']] for i in _dataset[:train_size]],
44
+ 'negatives': []
45
+ }))
46
+
47
+ with open(f"{export_dir}/valid.{os.path.basename(r)}.jsonl", 'w') as f:
48
+ f.write(json.dumps({
49
+ 'relation_type': os.path.basename(r),
50
+ 'positives': [[i['arg1'], i['arg2']] for i in _dataset[train_size:]],
51
+ 'negatives': []
52
+ }))