Julian von der Goltz
commited on
Commit
•
20c86ba
1
Parent(s):
0ed5420
Add data origins and generation script
Browse files- origin/titels_pd.csv +3 -0
- origin/xml_pd.zip +3 -0
- src/create_nl_dataset.py +89 -0
- src/requirements.txt +6 -0
origin/titels_pd.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a6dec2b669b3b2374e3fe53107d1f837f98e7233062418c425270185a6ecd9c
|
3 |
+
size 2679987
|
origin/xml_pd.zip
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c5f97a9189ed547bb54150fcb1343a9a77ae698ec99f1978ba0e6ed5a08761bf
|
3 |
+
size 665703428
|
src/create_nl_dataset.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import math
|
3 |
+
import zipfile
|
4 |
+
|
5 |
+
import bs4
|
6 |
+
import dateutil.parser
|
7 |
+
import pandas as pd
|
8 |
+
from tqdm import tqdm
|
9 |
+
|
10 |
+
|
11 |
+
def yield_file_contents(zip_path, train_df, val_df):
|
12 |
+
with (zipfile.ZipFile(zip_path, 'r') as zip_file):
|
13 |
+
for file_info in zip_file.infolist():
|
14 |
+
with zip_file.open(file_info, 'r') as file:
|
15 |
+
content = file.read()
|
16 |
+
soup = bs4.BeautifulSoup(content, 'xml')
|
17 |
+
|
18 |
+
id_blk = soup.find('idno', type="titelcode")
|
19 |
+
text_id = id_blk.text.strip() if id_blk is not None else file_info.filename.replace('.xml', '')
|
20 |
+
ti_id = '_'.join(text_id.split('_')[:-1])
|
21 |
+
|
22 |
+
train_row = train_df[train_df['ti_id'] == ti_id]
|
23 |
+
val_row = val_df[val_df['ti_id'] == ti_id]
|
24 |
+
is_train = len(train_row) > 0
|
25 |
+
is_val = len(val_row) > 0
|
26 |
+
if is_train:
|
27 |
+
meta = train_row.iloc[0].to_dict()
|
28 |
+
split = 'train'
|
29 |
+
elif is_val:
|
30 |
+
meta = val_row.iloc[0].to_dict()
|
31 |
+
split = 'validation'
|
32 |
+
else:
|
33 |
+
print(f'Did not find meta for {text_id}!')
|
34 |
+
|
35 |
+
for key, value in list(meta.items()):
|
36 |
+
if isinstance(value, float) and math.isnan(value):
|
37 |
+
meta[key] = ''
|
38 |
+
|
39 |
+
edition_blk = soup.find('edition')
|
40 |
+
edition = edition_blk.text.strip() if edition_blk is not None else None
|
41 |
+
|
42 |
+
lang_blk = soup.find('language')
|
43 |
+
language = lang_blk.get('id').strip() if lang_blk is not None else None
|
44 |
+
|
45 |
+
date_blk = soup.find('revisionDesc')
|
46 |
+
if date_blk is not None:
|
47 |
+
date_blk = date_blk.find('date')
|
48 |
+
if date_blk is not None:
|
49 |
+
try:
|
50 |
+
date = dateutil.parser.parse(
|
51 |
+
date_blk.text.strip(),
|
52 |
+
yearfirst=True,
|
53 |
+
dayfirst=True
|
54 |
+
).isoformat() if date_blk is not None else None
|
55 |
+
except Exception:
|
56 |
+
date = None
|
57 |
+
else:
|
58 |
+
date = None
|
59 |
+
|
60 |
+
meta['revision_date'] = date
|
61 |
+
meta['edition'] = edition
|
62 |
+
meta['language'] = language
|
63 |
+
|
64 |
+
for chap_idx, chapter in enumerate(soup.find_all('div', type='chapter')):
|
65 |
+
meta['chapter'] = chap_idx + 1
|
66 |
+
for sec_idx, section in enumerate(chapter.find_all('div', type='section')):
|
67 |
+
meta['section'] = sec_idx + 1
|
68 |
+
text = section.text.strip()
|
69 |
+
yield {'meta': meta, 'text': text, 'id': f"{text_id}_{chap_idx}_{sec_idx}"}, split
|
70 |
+
|
71 |
+
|
72 |
+
if __name__ == '__main__':
|
73 |
+
train_fraction = 0.95
|
74 |
+
metadata_path = '../origin/titels_pd.csv'
|
75 |
+
meta_df = pd.read_csv(metadata_path, header=1, sep='|')
|
76 |
+
|
77 |
+
meta_df = meta_df.sample(frac=1, random_state=0)
|
78 |
+
|
79 |
+
num_train = round(train_fraction*len(meta_df))
|
80 |
+
train_df = meta_df.iloc[:num_train]
|
81 |
+
val_df = meta_df.iloc[num_train:]
|
82 |
+
|
83 |
+
with open('../data/train.jsonl', 'w') as train_file:
|
84 |
+
with open('../data/val.jsonl', 'w') as val_file:
|
85 |
+
for item, split in tqdm(yield_file_contents('../origin/xml_pd.zip', train_df, val_df)):
|
86 |
+
if split == 'train':
|
87 |
+
train_file.write('{}\n'.format(json.dumps(item)))
|
88 |
+
if split == 'validation':
|
89 |
+
val_file.write('{}\n'.format(json.dumps(item)))
|
src/requirements.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
beautifulsoup4
|
2 |
+
python-dateutil
|
3 |
+
pandas
|
4 |
+
datasets
|
5 |
+
lxml
|
6 |
+
tqdm
|