Datasets:
Upload 2 files
Browse files- convert_ksk.py +99 -0
- test.jsonl +0 -0
convert_ksk.py
ADDED
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
TARGET = ".data/ksk-dopisy.vert.shuffled"
|
2 |
+
|
3 |
+
import os
|
4 |
+
import re
|
5 |
+
from typing import Dict
|
6 |
+
|
7 |
+
import jsonlines
|
8 |
+
from tqdm import tqdm
|
9 |
+
|
10 |
+
|
11 |
+
def process_vert_format(vert_content: str) -> Dict[str, str]:
|
12 |
+
doc_pattern = re.compile(r'<doc[^>]*>.*?</doc>', re.DOTALL)
|
13 |
+
|
14 |
+
# Pattern to match document boundaries and extract metadata
|
15 |
+
metadata_pattern = re.compile(
|
16 |
+
r'<doc id="([^"]*)"\s+signatura="([^"]*)"\s+archiv="([^"]*)"\s+pispohl="([^"]*)"\s+pisvek="([^"]*)"\s+pisvzdel="([^"]*)"\s+pispobyt="([^"]*)"\s+pister="([^"]*)"\s+vztah="([^"]*)"\s+adrpohl="([^"]*)"\s+adrvek="([^"]*)"\s+adrvzdel="([^"]*)"\s+rok="([^"]*)"\s+forma="([^"]*)">'
|
17 |
+
)
|
18 |
+
block_pattern = re.compile(r'<block[^>]*>.*?</block>', re.DOTALL)
|
19 |
+
note_pattern = re.compile(r'<note text="([^"]*)"/>\s*@')
|
20 |
+
|
21 |
+
# Pattern to remove whitespace before punctuation
|
22 |
+
ws_before_punct = re.compile(r'\s+([.,!?:;])')
|
23 |
+
|
24 |
+
# Find all documents
|
25 |
+
documents = re.findall(doc_pattern, vert_content)
|
26 |
+
processed_documents = {}
|
27 |
+
|
28 |
+
for doc in tqdm(documents):
|
29 |
+
# Extract metadata
|
30 |
+
metadata_match = re.search(metadata_pattern, doc)
|
31 |
+
if metadata_match:
|
32 |
+
# r'<doc id="([^"]*)".+?rok="([^"]*)".+?misto="([^"]*)" sidlotyp="([^"]*)".+?tema="([^"]*)" pocetml="([^"]*)"')
|
33 |
+
doc_id = metadata_match.group(1)
|
34 |
+
signatura = metadata_match.group(2)
|
35 |
+
archiv = metadata_match.group(3)
|
36 |
+
pispohl = metadata_match.group(4)
|
37 |
+
pisvek = metadata_match.group(5)
|
38 |
+
pisvzdel = metadata_match.group(6)
|
39 |
+
pispobyt = metadata_match.group(7)
|
40 |
+
pister = metadata_match.group(8)
|
41 |
+
vztah = metadata_match.group(9)
|
42 |
+
adrpohl = metadata_match.group(10)
|
43 |
+
adrvek = metadata_match.group(11)
|
44 |
+
adrvzdel = metadata_match.group(12)
|
45 |
+
rok = metadata_match.group(13)
|
46 |
+
forma = metadata_match.group(14)
|
47 |
+
|
48 |
+
pispohl = "Žena" if pispohl == "F" else ("Muž" if pispohl == "M" else pispohl)
|
49 |
+
adrpohl = "Žena" if adrpohl == "F" else ("Muž" if adrpohl == "M" else adrpohl)
|
50 |
+
|
51 |
+
metadata_str = (f"Pohlaví pisatele: {pispohl}, "
|
52 |
+
f"Pobyt pisatele: {pispobyt}, "
|
53 |
+
f"Počet dětí pisatele: {pister}, "
|
54 |
+
f"Pohlaví adresáta: {adrpohl}, "
|
55 |
+
f"Rok: {rok}, ")
|
56 |
+
else:
|
57 |
+
raise ValueError("Metadata not found in document")
|
58 |
+
|
59 |
+
for bid, block in enumerate(re.findall(block_pattern, doc)):
|
60 |
+
# replace notes followed by tags in the document just with notes
|
61 |
+
block = note_pattern.sub(r'@\1@', block)
|
62 |
+
|
63 |
+
# remove tags from each line, and join text
|
64 |
+
tokens = [line.split("\t")[0].strip() for line in block.split("\n") if line.strip() != ""]
|
65 |
+
doc_text = " ".join(tokens)
|
66 |
+
|
67 |
+
# remove any text with <...> tag
|
68 |
+
doc_text = re.sub(r'<[^>]*>', '', doc_text)
|
69 |
+
|
70 |
+
# replace more than one space with one space
|
71 |
+
doc_text = re.sub(r'\s+', ' ', doc_text).strip()
|
72 |
+
|
73 |
+
# remove whitespace before ., !, ?
|
74 |
+
doc_text = re.sub(ws_before_punct, r'\1', doc_text)
|
75 |
+
|
76 |
+
# - sometimes lines in oral are empty? e.g. 08A009N // REMOVE THESE LINES
|
77 |
+
if doc_text.strip() == "":
|
78 |
+
continue
|
79 |
+
|
80 |
+
processed_documents[f"{doc_id}_{bid}"] = metadata_str + "\n" + doc_text
|
81 |
+
|
82 |
+
|
83 |
+
|
84 |
+
return processed_documents
|
85 |
+
|
86 |
+
|
87 |
+
# Read the content from the file
|
88 |
+
with open(TARGET, "r") as f:
|
89 |
+
vert_content = f.read()
|
90 |
+
|
91 |
+
# Process the content
|
92 |
+
processed_documents = process_vert_format(vert_content)
|
93 |
+
|
94 |
+
# write all splits into same json file in .data/hf_dataset/cnc_fictree/test.jsonl
|
95 |
+
OF = ".data/hf_dataset/cnc_ksk/test.jsonl"
|
96 |
+
os.makedirs(os.path.dirname(OF), exist_ok=True)
|
97 |
+
with jsonlines.open(OF, "w") as writer:
|
98 |
+
for doc_id, doc in list(processed_documents.items()):
|
99 |
+
writer.write({"text": doc, "id": doc_id})
|
test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|