init
Browse files- process.py +14 -1
- t_rex.py +4 -4
process.py
CHANGED
@@ -7,9 +7,10 @@ import json
|
|
7 |
import os
|
8 |
from glob import glob
|
9 |
from tqdm import tqdm
|
|
|
10 |
|
11 |
os.makedirs('data', exist_ok=True)
|
12 |
-
f_writer = open('data/
|
13 |
for i in tqdm(glob("*.json")):
|
14 |
with open(i) as f:
|
15 |
data = json.load(f)
|
@@ -22,3 +23,15 @@ for i in tqdm(glob("*.json")):
|
|
22 |
continue
|
23 |
out = {"predicate": p, "object": o, "subject": s, "title": _data["title"], "text": _data["text"]}
|
24 |
f_writer.write(json.dumps(out) + "\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
import os
|
8 |
from glob import glob
|
9 |
from tqdm import tqdm
|
10 |
+
from random import shuffle, seed
|
11 |
|
12 |
os.makedirs('data', exist_ok=True)
|
13 |
+
f_writer = open('data/t_rex.raw.jsonl', 'w')
|
14 |
for i in tqdm(glob("*.json")):
|
15 |
with open(i) as f:
|
16 |
data = json.load(f)
|
|
|
23 |
continue
|
24 |
out = {"predicate": p, "object": o, "subject": s, "title": _data["title"], "text": _data["text"]}
|
25 |
f_writer.write(json.dumps(out) + "\n")
|
26 |
+
f_writer.close()
|
27 |
+
|
28 |
+
seed(0)
|
29 |
+
with open('data/t_rex.raw.jsonl') as f:
|
30 |
+
data = [json.loads(i) for i in f.read().split('\n') if len(i) > 0]
|
31 |
+
shuffle(data)
|
32 |
+
train = data[:int(len(data) * 0.7)]
|
33 |
+
val = data[int(len(data) * 0.7):int(len(data) * 0.85)]
|
34 |
+
test = data[int(len(data) * 0.85):]
|
35 |
+
for i, j in zip([train, val, test], ['train', 'validation', 'test']):
|
36 |
+
with open(f'data/t_rex.raw.{j}.jsonl', 'w') as f:
|
37 |
+
f.write('\n'.join([json.dumps(l) for l in i]))
|
t_rex.py
CHANGED
@@ -17,11 +17,11 @@ _CITATION = """
|
|
17 |
|
18 |
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
19 |
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
20 |
-
_TYPES = ["
|
21 |
_URLS = {i: {
|
22 |
-
str(datasets.Split.TRAIN): [f'{_URL}/{i}.train.jsonl'],
|
23 |
-
str(datasets.Split.VALIDATION): [f'{_URL}/{i}.validation.jsonl'],
|
24 |
-
str(datasets.Split.TEST): [f'{_URL}/{i}.test.jsonl']
|
25 |
} for i in _TYPES}
|
26 |
|
27 |
|
|
|
17 |
|
18 |
_HOME_PAGE = "https://github.com/asahi417/relbert"
|
19 |
_URL = f'https://huggingface.co/datasets/relbert/{_NAME}/resolve/main/data'
|
20 |
+
_TYPES = ["raw"]
|
21 |
_URLS = {i: {
|
22 |
+
str(datasets.Split.TRAIN): [f'{_URL}/t_rex.{i}.train.jsonl'],
|
23 |
+
str(datasets.Split.VALIDATION): [f'{_URL}/t_rex.{i}.validation.jsonl'],
|
24 |
+
str(datasets.Split.TEST): [f'{_URL}/t_rex.{i}.test.jsonl']
|
25 |
} for i in _TYPES}
|
26 |
|
27 |
|