asahi417 commited on
Commit
9b23379
1 Parent(s): f63efd0
conceptnet_high_confidence_v2.py CHANGED
@@ -3,8 +3,8 @@ import datasets
3
 
4
  logger = datasets.logging.get_logger(__name__)
5
  _DESCRIPTION = """[ConceptNet with high confidence](https://home.ttic.edu/~kgimpel/commonsense.html)"""
6
- _NAME = "conceptnet_high_confidence"
7
- _VERSION = "2.0.0"
8
  _CITATION = """
9
  @inproceedings{li-16,
10
  title = {Commonsense Knowledge Base Completion},
 
3
 
4
  logger = datasets.logging.get_logger(__name__)
5
  _DESCRIPTION = """[ConceptNet with high confidence](https://home.ttic.edu/~kgimpel/commonsense.html)"""
6
+ _NAME = "conceptnet_high_confidence_v2"
7
+ _VERSION = "2.0.1"
8
  _CITATION = """
9
  @inproceedings{li-16,
10
  title = {Commonsense Knowledge Base Completion},
dataset/train.jsonl CHANGED
The diff for this file is too large to render. See raw diff
 
get_stats.py CHANGED
@@ -1,7 +1,7 @@
1
  import pandas as pd
2
  from datasets import load_dataset
3
 
4
- data = load_dataset('relbert/conceptnet_high_confidence')
5
  stats = []
6
  for k in data.keys():
7
  for i in data[k]:
 
1
  import pandas as pd
2
  from datasets import load_dataset
3
 
4
+ data = load_dataset('relbert/conceptnet_high_confidence_v2')
5
  stats = []
6
  for k in data.keys():
7
  for i in data[k]:
process.py CHANGED
@@ -10,7 +10,7 @@ urls = {
10
  'dev2': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev2.txt.gz',
11
  'test': 'https://home.ttic.edu/~kgimpel/comsense_resources/test.txt.gz'
12
  }
13
- exclude = ['NotCapable', 'NotDesires']
14
 
15
 
16
  def wget(url, cache_dir: str = './cache'):
@@ -52,6 +52,7 @@ if __name__ == '__main__':
52
  continue
53
  if relation in exclude:
54
  continue
 
55
  df_n = train_n[train_n['relation'] == relation]
56
  f.write(json.dumps({
57
  'relation_type': relation,
 
10
  'dev2': 'https://home.ttic.edu/~kgimpel/comsense_resources/dev2.txt.gz',
11
  'test': 'https://home.ttic.edu/~kgimpel/comsense_resources/test.txt.gz'
12
  }
13
+ exclude = ['NotCapableOf', 'NotDesires']
14
 
15
 
16
  def wget(url, cache_dir: str = './cache'):
 
52
  continue
53
  if relation in exclude:
54
  continue
55
+ print(relation)
56
  df_n = train_n[train_n['relation'] == relation]
57
  f.write(json.dumps({
58
  'relation_type': relation,