Datasets:

Modalities:
Text
Languages:
English
Libraries:
Datasets
License:
asahi417 commited on
Commit
a493627
1 Parent(s): 373bd8e

fix readme

Browse files
Files changed (34) hide show
  1. .gitattributes +16 -0
  2. README.md +13 -12
  3. data/stats.data_size.csv +2 -2
  4. data/stats.entity_distribution.png +2 -2
  5. data/stats.predicate_distribution.png +2 -2
  6. data/stats.predicate_size.csv +4 -4
  7. data/t_rex.filter.min_entity_16_max_predicate_100.jsonl +0 -3
  8. data/t_rex.filter.min_entity_16_max_predicate_50.jsonl +0 -3
  9. data/t_rex.filter.min_entity_4_max_predicate_10.jsonl +0 -3
  10. data/t_rex.filter.min_entity_4_max_predicate_100.jsonl +0 -3
  11. data/t_rex.filter.min_entity_4_max_predicate_25.jsonl +0 -3
  12. data/t_rex.filter.min_entity_4_max_predicate_50.jsonl +0 -3
  13. data/t_rex.filter.min_entity_8_max_predicate_10.jsonl +0 -3
  14. data/t_rex.filter.min_entity_8_max_predicate_100.jsonl +0 -3
  15. data/t_rex.filter.min_entity_8_max_predicate_25.jsonl +0 -3
  16. data/t_rex.filter.min_entity_8_max_predicate_50.jsonl +0 -3
  17. data/{t_rex.filter.min_entity_12_max_predicate_10.jsonl → t_rex.filter_unified.min_entity_12_max_predicate_10.jsonl} +2 -2
  18. data/{t_rex.filter.min_entity_16_max_predicate_10.jsonl → t_rex.filter_unified.min_entity_12_max_predicate_100.jsonl} +2 -2
  19. data/{t_rex.filter.min_entity_12_max_predicate_25.jsonl → t_rex.filter_unified.min_entity_12_max_predicate_25.jsonl} +2 -2
  20. data/{t_rex.filter.min_entity_16_max_predicate_25.jsonl → t_rex.filter_unified.min_entity_12_max_predicate_50.jsonl} +2 -2
  21. data/t_rex.filter_unified.min_entity_16_max_predicate_10.jsonl +3 -0
  22. data/t_rex.filter_unified.min_entity_16_max_predicate_100.jsonl +3 -0
  23. data/t_rex.filter_unified.min_entity_16_max_predicate_25.jsonl +3 -0
  24. data/t_rex.filter_unified.min_entity_16_max_predicate_50.jsonl +3 -0
  25. data/t_rex.filter_unified.min_entity_4_max_predicate_10.jsonl +3 -0
  26. data/{t_rex.filter.min_entity_12_max_predicate_100.jsonl → t_rex.filter_unified.min_entity_4_max_predicate_100.jsonl} +2 -2
  27. data/t_rex.filter_unified.min_entity_4_max_predicate_25.jsonl +3 -0
  28. data/t_rex.filter_unified.min_entity_4_max_predicate_50.jsonl +3 -0
  29. data/t_rex.filter_unified.min_entity_8_max_predicate_10.jsonl +3 -0
  30. data/{t_rex.filter.min_entity_12_max_predicate_50.jsonl → t_rex.filter_unified.min_entity_8_max_predicate_100.jsonl} +2 -2
  31. data/t_rex.filter_unified.min_entity_8_max_predicate_25.jsonl +3 -0
  32. data/t_rex.filter_unified.min_entity_8_max_predicate_50.jsonl +3 -0
  33. filtering_purify.py +2 -2
  34. t_rex.py +6 -0
.gitattributes CHANGED
@@ -80,3 +80,19 @@ data/t_rex.filter.min_entity_16_max_predicate_100.jsonl filter=lfs diff=lfs merg
80
  data/t_rex.filter.min_entity_4_max_predicate_25.jsonl filter=lfs diff=lfs merge=lfs -text
81
  data/t_rex.filter.min_entity_8_max_predicate_25.jsonl filter=lfs diff=lfs merge=lfs -text
82
  data/t_rex.filter_unified.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80
  data/t_rex.filter.min_entity_4_max_predicate_25.jsonl filter=lfs diff=lfs merge=lfs -text
81
  data/t_rex.filter.min_entity_8_max_predicate_25.jsonl filter=lfs diff=lfs merge=lfs -text
82
  data/t_rex.filter_unified.jsonl filter=lfs diff=lfs merge=lfs -text
83
+ data/t_rex.filter_unified.min_entity_16_max_predicate_25.jsonl filter=lfs diff=lfs merge=lfs -text
84
+ data/t_rex.filter_unified.min_entity_8_max_predicate_100.jsonl filter=lfs diff=lfs merge=lfs -text
85
+ data/t_rex.filter_unified.min_entity_12_max_predicate_25.jsonl filter=lfs diff=lfs merge=lfs -text
86
+ data/t_rex.filter_unified.min_entity_8_max_predicate_10.jsonl filter=lfs diff=lfs merge=lfs -text
87
+ data/t_rex.filter_unified.min_entity_8_max_predicate_25.jsonl filter=lfs diff=lfs merge=lfs -text
88
+ data/t_rex.filter_unified.min_entity_8_max_predicate_50.jsonl filter=lfs diff=lfs merge=lfs -text
89
+ data/t_rex.filter_unified.min_entity_4_max_predicate_50.jsonl filter=lfs diff=lfs merge=lfs -text
90
+ data/t_rex.filter_unified.min_entity_16_max_predicate_10.jsonl filter=lfs diff=lfs merge=lfs -text
91
+ data/t_rex.filter_unified.min_entity_16_max_predicate_100.jsonl filter=lfs diff=lfs merge=lfs -text
92
+ data/t_rex.filter_unified.min_entity_16_max_predicate_50.jsonl filter=lfs diff=lfs merge=lfs -text
93
+ data/t_rex.filter_unified.min_entity_4_max_predicate_10.jsonl filter=lfs diff=lfs merge=lfs -text
94
+ data/t_rex.filter_unified.min_entity_4_max_predicate_25.jsonl filter=lfs diff=lfs merge=lfs -text
95
+ data/t_rex.filter_unified.min_entity_12_max_predicate_10.jsonl filter=lfs diff=lfs merge=lfs -text
96
+ data/t_rex.filter_unified.min_entity_12_max_predicate_100.jsonl filter=lfs diff=lfs merge=lfs -text
97
+ data/t_rex.filter_unified.min_entity_12_max_predicate_50.jsonl filter=lfs diff=lfs merge=lfs -text
98
+ data/t_rex.filter_unified.min_entity_4_max_predicate_100.jsonl filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -21,7 +21,7 @@ We split the raw T-REX dataset into train/validation/test split by the ratio of
21
  ### Filtering to Remove Noise
22
 
23
  We apply filtering to keep triples with alpha-numeric subject and object, as well as triples with at least either of subject or object is a named-entity.
24
- After the filtering, we manually remove too vague and noisy predicate, and unify same predicates with different names.
25
 
26
  | Dataset | Raw | Filter | Unification |
27
  |----------:|----------:|----------:|--------------:|
@@ -38,20 +38,21 @@ we choose top-`max predicate` triples based on the frequency of the subject and
38
 
39
  - number of triples in each configuration
40
 
41
- | min entity / max predicate | 10 | 25 | 50 | 100 |
42
- |-----------------------------:|-----:|-----:|------:|------:|
43
- | 4 | 4,501 | 9,245 | 15,196 | 23,936 |
44
- | 8 | 3,557 | 7,291 | 11,804 | 18,699 |
45
- | 12 | 3,132 | 6,346 | 10,155 | 16,115 |
46
- | 16 | 2,769 | 5,559 | 9,014 | 14,499 |
47
 
 
48
 
49
- | min entity | predicate |
50
  |-------------:|-----:|
51
- | 4 | 516 |
52
- | 8 | 409 |
53
- | 12 | 366 |
54
- | 16 | 321 |
55
 
56
  - distribution of entities
57
 
 
21
  ### Filtering to Remove Noise
22
 
23
  We apply filtering to keep triples with alpha-numeric subject and object, as well as triples with at least either of subject or object is a named-entity.
24
+ After the filtering, we manually remove too vague and noisy predicate, and unify same predicates with different names (see the annotation [here](https://huggingface.co/datasets/relbert/t_rex/raw/main/predicate_manual_check.csv)).
25
 
26
  | Dataset | Raw | Filter | Unification |
27
  |----------:|----------:|----------:|--------------:|
 
38
 
39
  - number of triples in each configuration
40
 
41
+ | min entity / max predicate | 10 | 25 | 50 | 100 |
42
+ |-----------------------------:|-----:|-----:|-----:|------:|
43
+ | 4 | 1781 | 3961 | 6717 | 10768 |
44
+ | 8 | 1522 | 3216 | 5286 | 8443 |
45
+ | 12 | 1301 | 2720 | 4505 | 7191 |
46
+ | 16 | 1120 | 2389 | 3994 | 6481 |
47
 
48
+ - number of predicates in different min entity size
49
 
50
+ | min entity | 10 |
51
  |-------------:|-----:|
52
+ | 4 | 193 |
53
+ | 8 | 168 |
54
+ | 12 | 146 |
55
+ | 16 | 123 |
56
 
57
  - distribution of entities
58
 
data/stats.data_size.csv CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a75c050e067579359eef2dd61cfbcc4070654c70aea236153c2463a3191d582
3
- size 137
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b887e35489f814154dd4fcf5148ed40d43d033e790e38643861007aa6bd8b0c7
3
+ size 131
data/stats.entity_distribution.png CHANGED

Git LFS Details

  • SHA256: ece42f8d40e367320c6cacbd030edbe0eed91744f649e21e153eba24434288f1
  • Pointer size: 130 Bytes
  • Size of remote file: 65.2 kB

Git LFS Details

  • SHA256: 8b1aedb4363cdc43b05e8c5434cccfd8a2e531386b16c4aa7f9818dbb2833858
  • Pointer size: 130 Bytes
  • Size of remote file: 68.8 kB
data/stats.predicate_distribution.png CHANGED

Git LFS Details

  • SHA256: ad7aaff63ca3316191554066e879739cca34316ad0415437dcb491b367ba3f27
  • Pointer size: 130 Bytes
  • Size of remote file: 84.7 kB

Git LFS Details

  • SHA256: 9a82b2a0fcf4ce1f34e976f4bd7e55b29ef1603a9b3441e94ba056aaafea92c2
  • Pointer size: 130 Bytes
  • Size of remote file: 85.2 kB
data/stats.predicate_size.csv CHANGED
@@ -1,5 +1,5 @@
1
  min entity,10
2
- 4,516
3
- 8,409
4
- 12,366
5
- 16,321
 
1
  min entity,10
2
+ 4,193
3
+ 8,168
4
+ 12,146
5
+ 16,123
data/t_rex.filter.min_entity_16_max_predicate_100.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:83df4f92ad5366b1c1b30b1d82d27ba15cbf9fe8656ac25e9253267987774e8d
3
- size 19047802
 
 
 
 
data/t_rex.filter.min_entity_16_max_predicate_50.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4192b21dd6767564f60d9e4ad5762d92753fd53ba1d0fca3ff051ee5b6926b21
3
- size 11967293
 
 
 
 
data/t_rex.filter.min_entity_4_max_predicate_10.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:063063799b5aaa4c55a8748afad4c5fa76e5eaceda9ed0dc52d9709acb7fc829
3
- size 6096470
 
 
 
 
data/t_rex.filter.min_entity_4_max_predicate_100.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6ce68333aaf726e68cf9f2f3792f270e993674171a84cf5854b8c5e1760fa777
3
- size 31419865
 
 
 
 
data/t_rex.filter.min_entity_4_max_predicate_25.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3028c980fec33c7fb5f4e17951bdc5e1daa86a83f2cf4b5bd6ab6611e1caa9b
3
- size 12353777
 
 
 
 
data/t_rex.filter.min_entity_4_max_predicate_50.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8fbc0df1edd1c3c317d7f45748deeb608391c64d3de11162664b996f540b5009
3
- size 20301813
 
 
 
 
data/t_rex.filter.min_entity_8_max_predicate_10.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1f35c4c8a0a1fe68b8538aacc6fbfb60b61724b33a2eec63c3b1d0c621dd33b5
3
- size 4772164
 
 
 
 
data/t_rex.filter.min_entity_8_max_predicate_100.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a1bbdc98cabc4c761eaa1b11ee7fb8a6a401c93716c5f06538623bcc38c598e
3
- size 24795168
 
 
 
 
data/t_rex.filter.min_entity_8_max_predicate_25.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c6188aa66d44d67c949ab872f22c1039de3f0affff792025465423ac9262d3e
3
- size 9800900
 
 
 
 
data/t_rex.filter.min_entity_8_max_predicate_50.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:62d57cf7448be05fe232100b11baddde6d1fbe48e5c3cb4e4b50abf125fd081d
3
- size 15780383
 
 
 
 
data/{t_rex.filter.min_entity_12_max_predicate_10.jsonl → t_rex.filter_unified.min_entity_12_max_predicate_10.jsonl} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6c82acb288dd26a2db2fde6e5cae77bf9dce42b2375aa1250e2bcf101b755360
3
- size 4206535
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bee332baf23f3b30d97ac34ff35d21daa0bfc44949fb81b5a53260146590852e
3
+ size 1717182
data/{t_rex.filter.min_entity_16_max_predicate_10.jsonl → t_rex.filter_unified.min_entity_12_max_predicate_100.jsonl} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a2d0446d54f35f8dc616499a42f04a088ab84072ad057e0f32a240f12d192599
3
- size 3748775
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab2e88b760860317b0033480f0abb15b800b1a8fa14df243b605add2c863e57a
3
+ size 8723923
data/{t_rex.filter.min_entity_12_max_predicate_25.jsonl → t_rex.filter_unified.min_entity_12_max_predicate_25.jsonl} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83440ded067fb54b368af4b4af4ea1476e407c6131abf8e3885045b1aafe7398
3
- size 8370852
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5b773e226a1f4d2b7affb0d017d9f6ac54b7df216d1fdd48a18e0f983163cf4
3
+ size 3415381
data/{t_rex.filter.min_entity_16_max_predicate_25.jsonl → t_rex.filter_unified.min_entity_12_max_predicate_50.jsonl} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02f2dc1e885f53989a8fa7774bb899ce30d4cf03c1cf945cf9331c5d64a451f6
3
- size 7364459
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:12973871cd44c9441e646dcdae12de370e273dfd8caa8b5710d0bb192f8b56e5
3
+ size 5611692
data/t_rex.filter_unified.min_entity_16_max_predicate_10.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfcb02689f593cf273b44818fe20ad9a551c7343a9d8196b6f26c05671bc6858
3
+ size 1456921
data/t_rex.filter_unified.min_entity_16_max_predicate_100.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a82011a3a3d874112a6ec09bc0cb014a762189538a4adafbcb64be50d1e791ca
3
+ size 7808966
data/t_rex.filter_unified.min_entity_16_max_predicate_25.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c42756cf46607a62183eb284be0eb1400ae02acc07d39e84b0aca118badac7c
3
+ size 2985297
data/t_rex.filter_unified.min_entity_16_max_predicate_50.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72033a65fef1890b808e45f9524d4c30dd99480255e155bcb8ad68eb6e847a36
3
+ size 4926634
data/t_rex.filter_unified.min_entity_4_max_predicate_10.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:364307a693fdbdf09fc5db63416294539715d8339484b0077590f8d254ba1ed5
3
+ size 2384750
data/{t_rex.filter.min_entity_12_max_predicate_100.jsonl → t_rex.filter_unified.min_entity_4_max_predicate_100.jsonl} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:54c4e51e6d2f26596009faa44b642f55ab5c7675f1a87ec825441940dd745446
3
- size 21314377
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a50d482e305403b1a8ff54dbb1fe4fb00f718618191bced35a15c88038960408
3
+ size 13630042
data/t_rex.filter_unified.min_entity_4_max_predicate_25.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c1e0035edda7cdf139f0939cbc04fd55e09eae905ade8971647b069225b823b
3
+ size 5219065
data/t_rex.filter_unified.min_entity_4_max_predicate_50.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d65cf6728fc550108c6b59a313ab93a14461ca636ff08d5d2a4e3ed3b37ddaaf
3
+ size 8669765
data/t_rex.filter_unified.min_entity_8_max_predicate_10.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b64f639231c042eb2b22132b24fd12487ecd705e604b16841bd562c7422733d
3
+ size 2001091
data/{t_rex.filter.min_entity_12_max_predicate_50.jsonl → t_rex.filter_unified.min_entity_8_max_predicate_100.jsonl} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1be72451c6aeed8dc3490703b4b675db9872fbc4b524f71cc8b4fddecaeb0bc3
3
- size 13536253
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc844a1e884d23209bd26cf26ee801337e545014dfc532f2d8304d6e59d0b836
3
+ size 10490218
data/t_rex.filter_unified.min_entity_8_max_predicate_25.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0b4a117fa4dfc3e7fb26bcc09d48f62c1e3388002caef665fa7d4e101c842eee
3
+ size 4120754
data/t_rex.filter_unified.min_entity_8_max_predicate_50.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f15ad4d9ea3907ee2c8e8e40f5fe06f57aa3fcf77a6af62c633e4a8ff006f96b
3
+ size 6698255
filtering_purify.py CHANGED
@@ -19,7 +19,7 @@ assert len(parameters_max_p_freq) == 4
19
  sns.set_theme(style="whitegrid")
20
 
21
  # load filtered data
22
- with open(f"data/t_rex.filter.jsonl") as f:
23
  data = Dataset.from_list([json.loads(i) for i in f.read().split('\n') if len(i) > 0])
24
  df_main = data.to_pandas()
25
 
@@ -99,7 +99,7 @@ if __name__ == '__main__':
99
  data_size_full.append(data_size)
100
  config.append([min_e_freq, max_p_freq])
101
  # save data
102
- with open(f"data/t_rex.filter.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.jsonl", 'w') as f:
103
  f.write('\n'.join([json.dumps(i) for i in new_data]))
104
 
105
  # check statistics
 
19
  sns.set_theme(style="whitegrid")
20
 
21
  # load filtered data
22
+ with open(f"data/t_rex.filter_unified.jsonl") as f:
23
  data = Dataset.from_list([json.loads(i) for i in f.read().split('\n') if len(i) > 0])
24
  df_main = data.to_pandas()
25
 
 
99
  data_size_full.append(data_size)
100
  config.append([min_e_freq, max_p_freq])
101
  # save data
102
+ with open(f"data/t_rex.filter_unified.min_entity_{min_e_freq}_max_predicate_{max_p_freq}.jsonl", 'w') as f:
103
  f.write('\n'.join([json.dumps(i) for i in new_data]))
104
 
105
  # check statistics
t_rex.py CHANGED
@@ -1,4 +1,6 @@
1
  import json
 
 
2
  import datasets
3
 
4
 
@@ -17,7 +19,11 @@ _CITATION = """
17
 
18
  _HOME_PAGE = "https://github.com/asahi417/relbert"
19
  _URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
 
 
 
20
  _TYPES = ["raw", "filter", "filter_unified"]
 
21
  _NON_SPLITS = ["raw", "filter", "filter_unified"]
22
  _URLS = {i: {str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.jsonl']} if i in _NON_SPLITS else {
23
  str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.train.jsonl'],
 
1
  import json
2
+ from itertools import product
3
+
4
  import datasets
5
 
6
 
 
19
 
20
  _HOME_PAGE = "https://github.com/asahi417/relbert"
21
  _URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
22
+ MIN_ENTITY_FREQ = [4, 8, 12, 16]
23
+ MAX_PREDICATE_FREQ = [100, 50, 25, 10]
24
+
25
  _TYPES = ["raw", "filter", "filter_unified"]
26
+ # _TYPES += [f"filter_unified.min_entity_{a}_max_predicate_{b}" for a, b in product(MIN_ENTITY_FREQ, MAX_PREDICATE_FREQ)]
27
  _NON_SPLITS = ["raw", "filter", "filter_unified"]
28
  _URLS = {i: {str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.jsonl']} if i in _NON_SPLITS else {
29
  str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.train.jsonl'],