File size: 3,677 Bytes
20c86ba
 
 
 
 
728e117
20c86ba
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728e117
20c86ba
 
 
 
 
 
 
 
 
6e893d8
 
 
 
 
 
 
728e117
6e893d8
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import json
import math
import zipfile

import bs4
import datasets
import dateutil.parser
import pandas as pd
from tqdm import tqdm


def yield_file_contents(zip_path, train_df, val_df):
    with (zipfile.ZipFile(zip_path, 'r') as zip_file):
        for file_info in zip_file.infolist():
            with zip_file.open(file_info, 'r') as file:
                content = file.read()
            soup = bs4.BeautifulSoup(content, 'xml')

            id_blk = soup.find('idno', type="titelcode")
            text_id = id_blk.text.strip() if id_blk is not None else file_info.filename.replace('.xml', '')
            ti_id = '_'.join(text_id.split('_')[:-1])

            train_row = train_df[train_df['ti_id'] == ti_id]
            val_row = val_df[val_df['ti_id'] == ti_id]
            is_train = len(train_row) > 0
            is_val = len(val_row) > 0
            if is_train:
                meta = train_row.iloc[0].to_dict()
                split = 'train'
            elif is_val:
                meta = val_row.iloc[0].to_dict()
                split = 'validation'
            else:
                print(f'Did not find meta for {text_id}!')

            for key, value in list(meta.items()):
                if isinstance(value, float) and math.isnan(value):
                    meta[key] = ''

            edition_blk = soup.find('edition')
            edition = edition_blk.text.strip() if edition_blk is not None else None

            lang_blk = soup.find('language')
            language = lang_blk.get('id').strip() if lang_blk is not None else None
            
            date_blk = soup.find('revisionDesc')
            if date_blk is not None:
                date_blk = date_blk.find('date')
            if date_blk is not None:
                try:
                    date = dateutil.parser.parse(
                        date_blk.text.strip(),
                        yearfirst=True,
                        dayfirst=True
                    ).isoformat() if date_blk is not None else None
                except Exception:
                    date = None
            else:
                date = None

            meta['revision_date'] = date
            meta['edition'] = edition
            meta['language'] = language
            
            for chap_idx, chapter in enumerate(soup.find_all('div', type='chapter')):
                meta['chapter'] = chap_idx + 1
                for sec_idx, section in enumerate(chapter.find_all('div', type='section')):
                    meta['section'] = sec_idx + 1
                    text = section.text.strip()
                    yield {'meta': meta, 'text': text, 'id': f"{text_id}_{chap_idx}_{sec_idx}"}, split


if __name__ == '__main__':
    train_fraction = 0.90
    metadata_path = '../origin/titels_pd.csv'
    meta_df = pd.read_csv(metadata_path, header=1, sep='|')

    meta_df = meta_df.sample(frac=1, random_state=0)

    num_train = round(train_fraction*len(meta_df))
    train_df = meta_df.iloc[:num_train]
    val_df = meta_df.iloc[num_train:]

    # with open('tmp/train.jsonl', 'w') as train_file:
    #     with open('tmp/val.jsonl', 'w') as val_file:
    #         for item, split in tqdm(yield_file_contents('../origin/xml_pd.zip', train_df, val_df)):
    #             if split == 'train':
    #                 train_file.write('{}\n'.format(json.dumps(item)))
    #             if split == 'validation':
    #                 val_file.write('{}\n'.format(json.dumps(item)))

    datasets.Dataset.from_json('tmp/train.jsonl', split='train').to_parquet('../data/train.parquet')
    datasets.Dataset.from_json('tmp/val.jsonl', split='validation').to_parquet('../data/validation.parquet')