Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
File size: 1,141 Bytes
768f7eb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import json
from itertools import product

import pandas as pd

parameters_min_e_freq = [4, 8, 12, 16]
parameters_max_p_freq = [100, 50, 25, 10]

stats = []
for min_e_freq, max_p_freq in product(parameters_min_e_freq, parameters_max_p_freq):
    with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.train.jsonl") as f:
        train = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
    with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.validation.jsonl") as f:
        validation = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
    with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.test.jsonl") as f:
        test = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
    stats.append({
        "data": f"filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}",
        "train": len(train),
        "validation": len(validation),
        "test": len(test)
    })

df = pd.DataFrame(stats)
df['total'] = df['train'] + df['validation'] + df['test']
print(df.to_markdown(index=False))