Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
1a38689
1 Parent(s): e480913

fix readme

Browse files
Files changed (2) hide show
  1. nell.py +1 -3
  2. stats.py +6 -63
nell.py CHANGED
@@ -5,7 +5,7 @@ import datasets
5
  logger = datasets.logging.get_logger(__name__)
6
  _DESCRIPTION = """Few shots link prediction dataset. """
7
  _NAME = "nell"
8
- _VERSION = "0.0.7"
9
  _CITATION = """
10
  @inproceedings{xiong-etal-2018-one,
11
  title = "One-Shot Relational Learning for Knowledge Graphs",
@@ -76,10 +76,8 @@ class NELL(datasets.GeneratorBasedBuilder):
76
  {
77
  "relation": datasets.Value("string"),
78
  "head": datasets.Value("string"),
79
- "head_entity": datasets.Value("string"),
80
  "head_type": datasets.Value("string"),
81
  "tail": datasets.Value("string"),
82
- "tail_entity": datasets.Value("string"),
83
  "tail_type": datasets.Value("string"),
84
  }
85
  ),
 
5
  logger = datasets.logging.get_logger(__name__)
6
  _DESCRIPTION = """Few shots link prediction dataset. """
7
  _NAME = "nell"
8
+ _VERSION = "0.0.8"
9
  _CITATION = """
10
  @inproceedings{xiong-etal-2018-one,
11
  title = "One-Shot Relational Learning for Knowledge Graphs",
 
76
  {
77
  "relation": datasets.Value("string"),
78
  "head": datasets.Value("string"),
 
79
  "head_type": datasets.Value("string"),
80
  "tail": datasets.Value("string"),
 
81
  "tail_type": datasets.Value("string"),
82
  }
83
  ),
stats.py CHANGED
@@ -1,70 +1,13 @@
1
- import pandas as pd
2
- from datasets import load_dataset
3
  from itertools import chain
4
 
5
- tmp = {}
6
- for _type in ['nell_filter']:
7
- with open(f"data/{_type}.vocab.txt") as f:
8
- vocab = pd.DataFrame([i.split("\t") for i in f.read().split('\n') if len(i) > 0], columns=["entity", "type"])
9
- vocab_df = vocab.groupby("type").count().sort_values(by="entity", ascending=False)
10
- vocab_df['sample'] = [
11
- ", ".join(vocab[vocab.type == i].sample(min(3, sum(vocab.type == i)))["entity"].values.tolist()) for i in
12
- vocab_df.index]
13
- tmp[_type] = vocab_df
14
- keys = set(list(chain(*[list(v.index) for v in tmp.values()])))
15
- df = pd.DataFrame([{
16
- "entity_type": k,
17
- "nell": tmp["nell"].loc[k]['entity'] if k in tmp["nell"].index else 0,
18
- "nell_filter": tmp["nell_filter"].loc[k]['entity'] if k in tmp["nell_filter"].index else 0,
19
- "sample": tmp["nell"].loc[k]['sample'] if k in tmp["nell"].index else tmp["nell_filter"].loc[k]['sample']
20
- } for k in keys]).sort_values(by="nell", ascending=False)
21
- df = pd.concat([df, pd.DataFrame([{
22
- "entity_type": "SUM",
23
- 'nell': df['nell'].sum(),
24
- 'nell_filter': df['nell_filter'].sum(),
25
- 'sample': ""}])])
26
- df.to_csv(f"stats/stats.vocab.csv", index=False)
27
- print(f"\nVocab Size")
28
- print(df.to_markdown(index=False))
29
-
30
- for split in ['train', 'validation', 'test']:
31
- print(split)
32
 
33
- tmp = {}
34
- for _type in ['nell_filter']:
35
- data = load_dataset("relbert/nell", _type, split=split)
36
- df = data.to_pandas()
37
 
38
- tail = df.groupby("tail_type")['relation'].count().to_dict()
39
- head = df.groupby("head_type")['relation'].count().to_dict()
40
- k = set(list(tail.keys()) + list(head.keys()))
41
- df_types = pd.DataFrame([{"entity_type": _k, "tail": tail[_k] if _k in tail else 0, "head": head[_k] if _k in head else 0} for _k in k])
42
- df_types.index = df_types.pop("entity_type")
43
- tmp[_type] = df_types
44
 
45
- keys = set(list(chain(*[list(v.index) for v in tmp.values()])))
46
- df = pd.DataFrame([{
47
- "entity_type": k,
48
- "nell (head)": tmp["nell"].loc[k]['head'] if k in tmp["nell"].index else 0,
49
- "nell_filter (head)": tmp["nell_filter"].loc[k]['head'] if k in tmp["nell_filter"].index else 0,
50
- "nell (tail)": tmp["nell"].loc[k]['tail'] if k in tmp["nell"].index else 0,
51
- "nell_filter (tail)": tmp["nell_filter"].loc[k]['tail'] if k in tmp["nell_filter"].index else 0,
52
- } for k in keys]).sort_values(by="nell (head)", ascending=False)
53
- df.to_csv(f"stats/stats.{split}.entity.csv", index=False)
54
- print(f"\nHead/Tail Size")
55
- print(df.to_markdown(index=False))
56
 
57
- tmp = {}
58
- data = load_dataset("relbert/nell", split=split)
59
- df = data.to_pandas()
60
- tmp[_type] = df.groupby("relation").count()['head']
61
- keys = set(list(chain(*[list(v.index) for v in tmp.values()])))
62
- df = pd.DataFrame([{
63
- "relation_type": k,
64
- "nell": tmp["nell"].loc[k] if k in tmp["nell"].index else 0,
65
- "nell_filter": tmp["nell_filter"].loc[k] if k in tmp["nell_filter"].index else 0,
66
- } for k in keys]).sort_values(by="nell", ascending=False)
67
- df.to_csv(f"stats/stats.{split}.relation.csv", index=False)
68
- print(f"\nRelation Size")
69
- print(df.to_markdown(index=False))
70
 
 
 
 
1
  from itertools import chain
2
 
3
+ import pandas as pd
4
+ from datasets import load_dataset
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ def get_stats(split):
7
+ data = load_dataset("relbert/nell", split=split)
8
+ df = data.to_pandas()
 
9
 
 
 
 
 
 
 
10
 
11
+ s = 'test'
 
 
 
 
 
 
 
 
 
 
12
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13