|
|
|
|
|
import gzip |
|
import os |
|
import re |
|
from typing import Dict |
|
from tqdm import tqdm |
|
|
|
FILE_PATH = ".data/ORTOFONv1/ortofon_v1_vert.gz" |
|
with gzip.open(FILE_PATH, "rt") as f: |
|
data = f.read() |
|
|
|
def process_vert_format_ortofon(vert_content: str) -> Dict[str, str]: |
|
|
|
doc_pattern = re.compile(r'<doc[^>]*>.*?</doc>', re.DOTALL) |
|
metadata_pattern = re.compile( |
|
r'<doc id="([^"]*)" year="([^"]*)" month="([^"]*)" location="([^"]*)" situation="([^"]*)" speakers="([^"]*)" genders="([^"]*)" generations="([^"]*)" relationship="([^"]*)"[^>]*>') |
|
|
|
|
|
sp_pattern = re.compile(r'<sp[^>]*nickname="([^"]*)"[^>]*>(.*?)</sp>', re.DOTALL) |
|
|
|
|
|
pw_pattern = re.compile(r'<pw>\n(.*?)</pw>\n', re.DOTALL) |
|
|
|
|
|
remove_speaker_suffix = re.compile(r'_[0-9]+$') |
|
|
|
|
|
ws_before_punct = re.compile(r'\s+([.!?])') |
|
|
|
|
|
documents = re.findall(doc_pattern, vert_content) |
|
processed_documents = {} |
|
|
|
for doc in tqdm(documents): |
|
|
|
metadata_match = re.search(metadata_pattern, doc) |
|
if metadata_match: |
|
doc_id = metadata_match.group(1) |
|
location = metadata_match.group(4) |
|
situation = metadata_match.group(5) |
|
speakers = metadata_match.group(6) |
|
genders = metadata_match.group(7) |
|
generations = metadata_match.group(8) |
|
relationship = metadata_match.group(9) |
|
metadata_str = (f"Lokalita: {location}, Situace: {situation}, " |
|
f"Počet mluvčích: {speakers}, Pohlaví: {genders}, " |
|
f"Generace: {generations}, Vztah: {relationship}") |
|
else: |
|
raise ValueError("Metadata not found in document") |
|
|
|
|
|
processed_document = [metadata_str] |
|
|
|
|
|
for sp_match in re.findall(sp_pattern, doc): |
|
speaker_id = sp_match[0] |
|
|
|
speaker_id = re.sub(remove_speaker_suffix, '', speaker_id) |
|
|
|
|
|
if speaker_id == "Y": |
|
speaker_id = "Zvuk" |
|
|
|
|
|
sp_content = sp_match[1] |
|
|
|
segs = re.findall(pw_pattern, sp_content) |
|
if segs == []: |
|
segs = [sp_content] |
|
|
|
tokens = [line.split("\t")[0].strip() for seg in segs for line in seg.split("\n") if line != ""] |
|
speaker_text = " ".join(tokens) |
|
|
|
|
|
if speaker_text.strip() == "...": |
|
continue |
|
|
|
if speaker_text.strip() == "@": |
|
continue |
|
|
|
|
|
speaker_text = re.sub(ws_before_punct, r'\1', speaker_text) |
|
|
|
|
|
processed_document.append(f"[mluvčí: {speaker_id}] {speaker_text}") |
|
|
|
|
|
|
|
final_text = '\n'.join(processed_document) |
|
processed_documents[doc_id] = final_text |
|
|
|
return processed_documents |
|
|
|
|
|
ortofon_data = process_vert_format_ortofon(data) |
|
del data |
|
|
|
|
|
FILE_PATH = ".data/ORAL2013/oral2013_vert.gz" |
|
with gzip.open(FILE_PATH, "rt") as f: |
|
data = f.read() |
|
|
|
def process_vert_format_oral(vert_content: str) -> Dict[str, str]: |
|
|
|
doc_pattern = re.compile(r'<doc[^>]*>.*?</doc>', re.DOTALL) |
|
metadata_pattern = re.compile( |
|
r'<doc id="([^"]*)" temp="([^"]*)" pocet="([^"]*)" vztah="([^"]*)" situace="([^"]*)" promluva="([^"]*)"[^>]*>' |
|
) |
|
|
|
sp_pattern = re.compile(r'<sp[^>]*num="([^"]*)"[^>]*>(.*?)</sp>', re.DOTALL) |
|
|
|
|
|
seg_pattern = re.compile(r'<seg start="[^"]*" end="[^"]*">(.*?)</seg>\n', re.DOTALL) |
|
|
|
|
|
ws_before_punct = re.compile(r'\s+([.!?])') |
|
|
|
|
|
documents = re.findall(doc_pattern, vert_content) |
|
processed_documents = {} |
|
|
|
for doc in tqdm(documents): |
|
|
|
metadata_match = re.search(metadata_pattern, doc) |
|
if metadata_match: |
|
doc_id = metadata_match.group(1) |
|
situation = metadata_match.group(5) |
|
speakers = metadata_match.group(3) |
|
relationship = metadata_match.group(4) |
|
|
|
metadata_str = (f"Situace: {situation}, " |
|
f"Počet mluvčích: {speakers}, " |
|
f"Vztah: {relationship}") |
|
else: |
|
raise ValueError("Metadata not found in document") |
|
|
|
|
|
processed_document = [metadata_str] |
|
|
|
|
|
for sp_match in re.findall(sp_pattern, doc): |
|
speaker_id = sp_match[0] |
|
|
|
|
|
if speaker_id == "Y": |
|
speaker_id = "Zvuk" |
|
|
|
sp_content = sp_match[1] |
|
|
|
sp_content = sp_content.replace("---", "") |
|
sp_content = sp_content.replace("...:", "") |
|
sp_content = sp_content.replace("...", "") |
|
sp_content = sp_content.replace("?.", "?") |
|
|
|
|
|
segs = re.findall(seg_pattern, sp_content) |
|
if segs == []: |
|
segs = [sp_content] |
|
|
|
tokens = [line.split("\t")[0].strip() for seg in segs for line in seg.split("\n") if line.strip() != ""] |
|
speaker_text = " ".join(tokens) |
|
|
|
|
|
speaker_text = re.sub(ws_before_punct, r'\1', speaker_text) |
|
|
|
|
|
if speaker_text.strip() == "": |
|
continue |
|
|
|
|
|
processed_document.append(f"[mluvčí: {speaker_id}] {speaker_text}") |
|
|
|
|
|
|
|
final_text = '\n'.join(processed_document) |
|
processed_documents[doc_id] = final_text |
|
|
|
return processed_documents |
|
|
|
oral_data = process_vert_format_oral(data) |
|
|
|
|
|
ortofon_data.update(oral_data) |
|
|
|
|
|
|
|
import jsonlines |
|
FILE_PATH = ".data/hf_dataset/ortofon_oral/test.jsonl" |
|
|
|
os.makedirs(os.path.dirname(FILE_PATH), exist_ok=True) |
|
with jsonlines.open(FILE_PATH, 'w') as writer: |
|
for doc_id, doc in ortofon_data.items(): |
|
writer.write({"text": doc, "id": doc_id}) |