Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
37599eb
1 Parent(s): 32d7e91
README.md CHANGED
@@ -45,18 +45,20 @@ we choose top-`max predicate` triples based on the frequency of the subject and
45
 
46
  - number of triples in each configuration
47
 
48
- | min entity/max predicate | 10 | 25 | 50 | 100 |
49
- |-------------------------|-----|------|------|------|
50
- | 1 | 6,052 | 12,295 | 20,602 | 33,206 |
51
- | 2 | 5,489 | 11,153 | 18,595 | 29,618 |
52
- | 3 | 4,986 | 10,093 | 16,599 | 26,151 |
53
- | 4 | 4,640 | 9,384 | 15,335 | 24,075 |
54
 
55
- - number of predicates in each configuration
56
 
57
- | min entity | 1 | 2 | 3 |4|
58
- |-----------------------------:|-----:|-----:|-----:|------:|
59
- | number of unique predicates | 818 | 726 | 665 | 614 |
 
 
 
60
 
61
  - distribution of entities
62
 
@@ -64,7 +66,7 @@ we choose top-`max predicate` triples based on the frequency of the subject and
64
 
65
  - distribution of predicates
66
 
67
- <img src="https://huggingface.co/datasets/relbert/t_rex/resolve/main/data/stats.predicate_distribution.max_predicate_100.png" alt="" width="500" style="margin-left:'auto' margin-right:'auto' display:'block'"/>
68
 
69
 
70
  ## Dataset Structure
 
45
 
46
  - number of triples in each configuration
47
 
48
+ | min entity / max predicate | 10 | 25 | 50 | 100 |
49
+ |-----------------------------:|-----:|------:|------:|------:|
50
+ | 1 | 5832 | 12075 | 20382 | 32986 |
51
+ | 2 | 5309 | 10973 | 18415 | 29438 |
52
+ | 3 | 4836 | 9943 | 16449 | 26001 |
53
+ | 4 | 4501 | 9245 | 15196 | 23936 |
54
 
 
55
 
56
+ | min entity | predicate |
57
+ |-----------------------------:|------------:|
58
+ | 1 | 659 |
59
+ | 2 | 603 |
60
+ | 3 | 557 |
61
+ | 4 | 516 |
62
 
63
  - distribution of entities
64
 
 
66
 
67
  - distribution of predicates
68
 
69
+ <img src="https://huggingface.co/datasets/relbert/t_rex/resolve/main/data/stats.predicate_distribution.png" alt="" width="500" style="margin-left:'auto' margin-right:'auto' display:'block'"/>
70
 
71
 
72
  ## Dataset Structure
data/stats.data_size.csv CHANGED
@@ -1,5 +1,5 @@
1
- min entity / max predicate,10,25,50,100
2
- 1,6052,12295,20602,33206
3
- 2,5489,11153,18595,29618
4
- 3,4986,10093,16599,26151
5
- 4,4640,9384,15335,24075
 
1
+ min entity / max predicate,10,25,50,100,predicate
2
+ 1,5832,12075,20382,32986,659
3
+ 2,5309,10973,18415,29438,603
4
+ 3,4836,9943,16449,26001,557
5
+ 4,4501,9245,15196,23936,516
data/stats.entity_distribution.png CHANGED

Git LFS Details

  • SHA256: a728a2e0850d4f941ca295e52f04ed1bb4f75c8799cc5f66c23e46e072d5c7c6
  • Pointer size: 130 Bytes
  • Size of remote file: 66.1 kB

Git LFS Details

  • SHA256: 7275d6fdf7792c48a361dd458b26f2d4968b5b4aa0f599562a1c1ca8f1d93f62
  • Pointer size: 130 Bytes
  • Size of remote file: 66.1 kB
data/stats.predicate_distribution.max_predicate_100.png DELETED

Git LFS Details

  • SHA256: b78acc9e0b834b3a28e86743ecff9220a2d77c6793376fc070e5dd2e105adc60
  • Pointer size: 130 Bytes
  • Size of remote file: 54.8 kB
data/stats.predicate_distribution.max_predicate_25.png DELETED

Git LFS Details

  • SHA256: 22a0bb261884e089272088eb33da3665a1163736b5f5e73902a87b4b1138186d
  • Pointer size: 130 Bytes
  • Size of remote file: 48.8 kB
data/stats.predicate_distribution.max_predicate_50.png DELETED

Git LFS Details

  • SHA256: 86ad907696751a4e96ea4a3e03b4b1893dff7453400f4af2f40a9f44d2b25edf
  • Pointer size: 130 Bytes
  • Size of remote file: 52.5 kB
data/{stats.predicate_distribution.max_predicate_10.png → stats.predicate_distribution.png} RENAMED
File without changes
data/stats.predicate_size.csv DELETED
@@ -1,5 +0,0 @@
1
- min entity / max predicate,10,25,50,100
2
- 1,818,818,818,818
3
- 2,726,726,726,726
4
- 3,665,665,665,665
5
- 4,614,614,614,614
 
 
 
 
 
 
stats.py CHANGED
@@ -1,5 +1,6 @@
1
  """
2
- TODO:
 
3
  """
4
  import json
5
  from itertools import product
@@ -36,7 +37,7 @@ def filtering(row, min_freq: int = 3, target: str = "subject"):
36
  return row[target] >= min_freq
37
 
38
 
39
- def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 1, random_sampling: bool = True):
40
 
41
  df = df_main.copy()
42
 
@@ -77,7 +78,7 @@ def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 1, ran
77
  df_balanced.pop("count_subject")
78
  df_balanced.pop("count_object")
79
  df_balanced.pop("count_sum")
80
- target_data = [i.to_dict() for _, i in df_balanced]
81
 
82
  # return distribution
83
  predicate_dist = df_balanced.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
@@ -95,37 +96,47 @@ if __name__ == '__main__':
95
 
96
  # run filtering with different configs
97
  for min_e_freq, max_p_freq in candidates:
98
- p_dist, e_dist, data_size, new_data = main(min_entity_freq=min_e_freq, max_pairs_predicate=max_p_freq)
99
  p_dist_full.append(p_dist)
100
  e_dist_full.append(e_dist)
101
  data_size_full.append(data_size)
102
  config.append([min_e_freq, max_p_freq])
 
 
103
 
104
  # check statistics
105
  print("- Data Size")
106
  df_size = pd.DataFrame([{"min entity": mef, "max predicate": mpf, "freq": x} for x, (mef, mpf) in zip(data_size_full, candidates)])
107
  df_size = df_size.pivot(index="min entity", columns="max predicate", values="freq")
108
  df_size.index.name = "min entity / max predicate"
109
- df_size.to_csv("data/stats.data_size.csv")
110
- print(df_size.to_markdown())
111
- df_size = pd.DataFrame(
112
  [{"min entity": mef, "max predicate": mpf, "freq": len(x)} for x, (mef, mpf) in zip(p_dist_full, candidates)])
113
- df_size = df_size.pivot(index="min entity", columns="max predicate", values="freq")
114
- df_size.index.name = "min entity / max predicate"
115
- df_size.to_csv("data/stats.predicate_size.csv")
116
  print(df_size.to_markdown())
117
 
118
  # plot predicate distribution
119
  df_p = pd.DataFrame([dict(enumerate(sorted(p.values(), reverse=True))) for p in p_dist_full]).T
120
  df_p.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates]
121
- for mpf in [100, 50, 25, 10]:
122
- fig = plt.figure()
123
- _df_p = df_p[[f"min entity: {mef}, max predicate: {mpf}" for mef in [1, 2, 3, 4]]]
124
- _df_p.columns = [f"min entity: {mef}" for mef in [1, 2, 3, 4]]
125
- ax = sns.lineplot(data=_df_p, linewidth=2.5)
126
- ax.set(xlabel='unique predicates sorted by frequency', ylabel=f'number of triples', title='Predicate Distribution (max predicate: {mpf})')
127
- ax.get_figure().savefig(f"data/stats.predicate_distribution.max_predicate_{mpf}.png", bbox_inches='tight')
128
- ax.get_figure().clf()
 
 
 
 
 
 
 
 
 
 
129
 
130
  # plot entity distribution
131
  df_e = pd.DataFrame([dict(enumerate(sorted(e.values(), reverse=True))) for e in e_dist_full]).T
@@ -135,7 +146,7 @@ if __name__ == '__main__':
135
  for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], [100, 50, 25, 10]):
136
  _df = df_e[[f"min entity: {mef}, max predicate: {mpf}" for mef in [1, 2, 3, 4]]]
137
  _df.columns = [f"min entity: {mef}" for mef in [1, 2, 3, 4]]
138
- ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1.5)
139
  ax.set(xscale='log')
140
  if mpf != 100:
141
  ax.legend_.remove()
@@ -143,3 +154,4 @@ if __name__ == '__main__':
143
  fig.supxlabel('unique entities sorted by frequency')
144
  fig.supylabel('number of triples')
145
  fig.savefig("data/stats.entity_distribution.png", bbox_inches='tight')
 
 
1
  """
2
+ TODO: save the data with different config
3
+ TODO: get stats for the frequency based selection
4
  """
5
  import json
6
  from itertools import product
 
37
  return row[target] >= min_freq
38
 
39
 
40
+ def main(min_entity_freq, max_pairs_predicate, min_pairs_predicate: int = 3, random_sampling: bool = True):
41
 
42
  df = df_main.copy()
43
 
 
78
  df_balanced.pop("count_subject")
79
  df_balanced.pop("count_object")
80
  df_balanced.pop("count_sum")
81
+ target_data = [i.to_dict() for _, i in df_balanced.iterrows()]
82
 
83
  # return distribution
84
  predicate_dist = df_balanced.groupby("predicate")['text'].count().sort_values(ascending=False).to_dict()
 
96
 
97
  # run filtering with different configs
98
  for min_e_freq, max_p_freq in candidates:
99
+ p_dist, e_dist, data_size, new_data = main(min_entity_freq=min_e_freq, max_pairs_predicate=max_p_freq, random_sampling=False)
100
  p_dist_full.append(p_dist)
101
  e_dist_full.append(e_dist)
102
  data_size_full.append(data_size)
103
  config.append([min_e_freq, max_p_freq])
104
+ with open(f"data/t_rex.clean.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.jsonl", 'w') as f:
105
+ f.write('\n'.join([json.dumps(i) for i in new_data]))
106
 
107
  # check statistics
108
  print("- Data Size")
109
  df_size = pd.DataFrame([{"min entity": mef, "max predicate": mpf, "freq": x} for x, (mef, mpf) in zip(data_size_full, candidates)])
110
  df_size = df_size.pivot(index="min entity", columns="max predicate", values="freq")
111
  df_size.index.name = "min entity / max predicate"
112
+ df_size_p = pd.DataFrame(
 
 
113
  [{"min entity": mef, "max predicate": mpf, "freq": len(x)} for x, (mef, mpf) in zip(p_dist_full, candidates)])
114
+ df_size_p = df_size_p.pivot(index="max predicate", columns="min entity", values="freq")
115
+ df_size['predicate'] = df_size_p.loc[10]
116
+ df_size.to_csv("data/stats.data_size.csv")
117
  print(df_size.to_markdown())
118
 
119
  # plot predicate distribution
120
  df_p = pd.DataFrame([dict(enumerate(sorted(p.values(), reverse=True))) for p in p_dist_full]).T
121
  df_p.columns = [f"min entity: {mef}, max predicate: {mpf}" for mef, mpf in candidates]
122
+ plt.figure()
123
+ fig, axes = plt.subplots(2, 2, constrained_layout=True)
124
+ fig.suptitle('Predicate Distribution over Different Configurations')
125
+ for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], [100, 50, 25, 10]):
126
+ # fig = plt.figure()
127
+ _df = df_p[[f"min entity: {mef}, max predicate: {mpf}" for mef in [1, 2, 3, 4]]]
128
+ _df.columns = [f"min entity: {mef}" for mef in [1, 2, 3, 4]]
129
+ ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1)
130
+ if mpf != 100:
131
+ ax.legend_.remove()
132
+ axes[x, y].set_title(f'max predicate: {mpf}')
133
+ # ax.set(xlabel='unique predicates sorted by frequency', ylabel='number of triples', title='Predicate Distribution (max predicate: {mpf})')
134
+ # ax.get_figure().savefig(f"data/stats.predicate_distribution.max_predicate_{mpf}.png", bbox_inches='tight')
135
+ # ax.get_figure().clf()
136
+ fig.supxlabel('unique predicates sorted by frequency')
137
+ fig.supylabel('number of triples')
138
+ fig.savefig("data/stats.predicate_distribution.png", bbox_inches='tight')
139
+ fig.clf()
140
 
141
  # plot entity distribution
142
  df_e = pd.DataFrame([dict(enumerate(sorted(e.values(), reverse=True))) for e in e_dist_full]).T
 
146
  for (x, y), mpf in zip([(0, 0), (0, 1), (1, 0), (1, 1)], [100, 50, 25, 10]):
147
  _df = df_e[[f"min entity: {mef}, max predicate: {mpf}" for mef in [1, 2, 3, 4]]]
148
  _df.columns = [f"min entity: {mef}" for mef in [1, 2, 3, 4]]
149
+ ax = sns.lineplot(ax=axes[x, y], data=_df, linewidth=1)
150
  ax.set(xscale='log')
151
  if mpf != 100:
152
  ax.legend_.remove()
 
154
  fig.supxlabel('unique entities sorted by frequency')
155
  fig.supylabel('number of triples')
156
  fig.savefig("data/stats.entity_distribution.png", bbox_inches='tight')
157
+ fig.clf()