fix readme
Browse files- check_stats.py +8 -0
- data/stats.csv +17 -0
- t_rex.py +2 -2
check_stats.py
CHANGED
@@ -58,3 +58,11 @@ df_test = pd.DataFrame([{
|
|
58 |
for c in df_test.columns:
|
59 |
df_test.loc[:, c] = df_test[c].map('{:,d}'.format)
|
60 |
print(df_test.to_markdown(index=False))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
for c in df_test.columns:
|
59 |
df_test.loc[:, c] = df_test[c].map('{:,d}'.format)
|
60 |
print(df_test.to_markdown(index=False))
|
61 |
+
df["number of triples (test)"] = df_test["number of triples (test)"].values[0]
|
62 |
+
df["number of unique predicates (test)"] = df_test["number of unique predicates (test)"].values[0]
|
63 |
+
df["number of unique entities (test)"] = df_test["number of unique entities (test)"].values[0]
|
64 |
+
df.pop("number of triples (all)")
|
65 |
+
df.pop("number of unique predicates (all)")
|
66 |
+
df.pop("number of unique entities (all)")
|
67 |
+
df = df[sorted(df.columns)]
|
68 |
+
df.to_csv("data/stats.csv")
|
data/stats.csv
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
data,number of triples (test),number of triples (train),number of triples (validation),number of unique entities (test),number of unique entities (train),number of unique entities (validation),number of unique predicates (test),number of unique predicates (train),number of unique predicates (validation)
|
2 |
+
filter_unified.min_entity_1_max_predicate_100,122,"7,075",787,188,"8,496","1,324",34,212,166
|
3 |
+
filter_unified.min_entity_1_max_predicate_50,122,"4,131",459,188,"5,111",790,34,212,156
|
4 |
+
filter_unified.min_entity_1_max_predicate_25,122,"2,358",262,188,"3,079",465,34,212,144
|
5 |
+
filter_unified.min_entity_1_max_predicate_10,122,"1,134",127,188,"1,587",233,34,210,94
|
6 |
+
filter_unified.min_entity_2_max_predicate_100,122,"4,873",542,188,"5,386",887,34,195,139
|
7 |
+
filter_unified.min_entity_2_max_predicate_50,122,"3,002",334,188,"3,457",575,34,193,139
|
8 |
+
filter_unified.min_entity_2_max_predicate_25,122,"1,711",191,188,"2,112",331,34,195,113
|
9 |
+
filter_unified.min_entity_2_max_predicate_10,122,858,96,188,"1,149",177,34,194,81
|
10 |
+
filter_unified.min_entity_3_max_predicate_100,122,"3,659",407,188,"3,892",662,34,173,116
|
11 |
+
filter_unified.min_entity_3_max_predicate_50,122,"2,336",260,188,"2,616",447,34,174,115
|
12 |
+
filter_unified.min_entity_3_max_predicate_25,122,"1,390",155,188,"1,664",272,34,173,94
|
13 |
+
filter_unified.min_entity_3_max_predicate_10,122,689,77,188,922,135,34,171,59
|
14 |
+
filter_unified.min_entity_4_max_predicate_100,122,"2,995",333,188,"3,104",563,34,158,105
|
15 |
+
filter_unified.min_entity_4_max_predicate_50,122,"1,989",222,188,"2,225",375,34,157,102
|
16 |
+
filter_unified.min_entity_4_max_predicate_25,122,"1,221",136,188,"1,458",237,34,158,76
|
17 |
+
filter_unified.min_entity_4_max_predicate_10,122,603,68,188,797,126,34,157,52
|
t_rex.py
CHANGED
@@ -7,7 +7,7 @@ import datasets
|
|
7 |
logger = datasets.logging.get_logger(__name__)
|
8 |
_DESCRIPTION = """T-Rex dataset."""
|
9 |
_NAME = "t_rex"
|
10 |
-
_VERSION = "0.0.
|
11 |
_CITATION = """
|
12 |
@inproceedings{elsahar2018t,
|
13 |
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
@@ -23,7 +23,7 @@ MIN_ENTITY_FREQ = [1, 2, 3, 4]
|
|
23 |
MAX_PREDICATE_FREQ = [100, 50, 25, 10]
|
24 |
|
25 |
_TYPES = [f"filter_unified.min_entity_{a}_max_predicate_{b}" for a, b in product(MIN_ENTITY_FREQ, MAX_PREDICATE_FREQ)]
|
26 |
-
_TYPES += ["raw", "filter", "filter_unified"]
|
27 |
_NON_SPLITS = ["raw", "filter", "filter_unified"]
|
28 |
_URLS = {i: {str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.jsonl']} if i in _NON_SPLITS else {
|
29 |
str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.train.jsonl'],
|
|
|
7 |
logger = datasets.logging.get_logger(__name__)
|
8 |
_DESCRIPTION = """T-Rex dataset."""
|
9 |
_NAME = "t_rex"
|
10 |
+
_VERSION = "0.0.7"
|
11 |
_CITATION = """
|
12 |
@inproceedings{elsahar2018t,
|
13 |
title={T-rex: A large scale alignment of natural language with knowledge base triples},
|
|
|
23 |
MAX_PREDICATE_FREQ = [100, 50, 25, 10]
|
24 |
|
25 |
_TYPES = [f"filter_unified.min_entity_{a}_max_predicate_{b}" for a, b in product(MIN_ENTITY_FREQ, MAX_PREDICATE_FREQ)]
|
26 |
+
# _TYPES += ["raw", "filter", "filter_unified"]
|
27 |
_NON_SPLITS = ["raw", "filter", "filter_unified"]
|
28 |
_URLS = {i: {str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.jsonl']} if i in _NON_SPLITS else {
|
29 |
str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.train.jsonl'],
|