|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import json |
|
|
import os |
|
|
import re |
|
|
from typing import Dict, Iterable, List, Tuple |
|
|
|
|
|
import datasets |
|
|
|
|
|
|
|
|
|
|
|
from mediqa_oe.data.process_data import attach_transcript_section |
|
|
|
|
|
|
|
|
ACI_BENCH_URL = "https://github.com/wyim/aci-bench/archive/refs/heads/main.zip" |
|
|
PRIMOCK_URL = "https://github.com/babylonhealth/primock57/archive/refs/heads/main.zip" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
|
SIMORD loader that merges transcripts from ACI-Bench and PriMock57 into local annotations |
|
|
using mediqa-oe.data.process_data.attach_transcript_section. |
|
|
""" |
|
|
_CITATION = r"""@article{corbeil2025empowering, |
|
|
title={Empowering Healthcare Practitioners with Language Models: Structuring Speech Transcripts in Two Real-World Clinical Applications}, |
|
|
author={Corbeil, Jean-Philippe and Ben Abacha, Asma and Michalopoulos, George and Swazinna, Patrick and Del-Agua, Miguel and Tremblay, Julien and Jeeson Daniel, Aju and Bader, Corey and Cho, Yoon-Chan and Krishnan, Parvathi and Bodenstab, Nathan and Lin, Tony and Teng, Wen and Beaulieu, Francois and Vozila, Paul}, |
|
|
journal={arXiv preprint arXiv:2507.05517}, |
|
|
year={2025} |
|
|
}""" |
|
|
_LICENSE = "CDLA-2.0-permissive" |
|
|
_HOMEPAGE = "https://huggingface.co/datasets/microsoft/SIMORD" |
|
|
|
|
|
DATA_URLS = { |
|
|
"train": "https://huggingface.co/datasets/microsoft/SIMORD/resolve/main/data/train.json", |
|
|
"dev": "https://huggingface.co/datasets/microsoft/SIMORD/resolve/main/data/dev.json", |
|
|
"test": "https://huggingface.co/datasets/microsoft/SIMORD/resolve/main/data/test.json", |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
def _walk_json_files(directory: str) -> List[str]: |
|
|
out = [] |
|
|
for d, _, files in os.walk(directory): |
|
|
for fn in files: |
|
|
if fn.lower().endswith(".json"): |
|
|
out.append(os.path.join(d, fn)) |
|
|
out.sort() |
|
|
return out |
|
|
|
|
|
def _read_json_records(path: str) -> Iterable[dict]: |
|
|
with open(path, "r", encoding="utf-8") as f: |
|
|
data = json.load(f) |
|
|
if isinstance(data, dict) and "data" in data and isinstance(data["data"], list): |
|
|
for r in data["data"]: |
|
|
yield r |
|
|
elif isinstance(data, list): |
|
|
for r in data: |
|
|
yield r |
|
|
else: |
|
|
|
|
|
yield data |
|
|
|
|
|
def _normalize_id_from_aci(file_field: str, basename: str) -> str: |
|
|
|
|
|
|
|
|
|
|
|
file_id = "_".join((file_field or "").split("-")[0:2]) |
|
|
return f"acibench_{file_id}_{basename}" |
|
|
|
|
|
def _build_aci_transcript_dict(root: str) -> Dict[str, dict]: |
|
|
""" |
|
|
Mirror of read_aci_bench_data + walk_aci_bench_directory: |
|
|
looks in .../aci-bench-main/data/challenge_data_json and .../src_experiment_data_json |
|
|
and builds {transcript_id: {"transcript": <src_text>}} |
|
|
""" |
|
|
tdict: Dict[str, dict] = {} |
|
|
base = None |
|
|
|
|
|
for name in os.listdir(root): |
|
|
if name.startswith("aci-bench"): |
|
|
base = os.path.join(root, name) |
|
|
break |
|
|
if not base: |
|
|
return tdict |
|
|
|
|
|
for sub in ("data/challenge_data_json", "data/src_experiment_data_json"): |
|
|
p = os.path.join(base, sub) |
|
|
if not os.path.isdir(p): |
|
|
continue |
|
|
for fp in _walk_json_files(p): |
|
|
basename = os.path.splitext(os.path.basename(fp))[0] |
|
|
for rec in _read_json_records(fp): |
|
|
src = rec.get("src") |
|
|
file_field = rec.get("file", "") |
|
|
tid = _normalize_id_from_aci(file_field, basename) |
|
|
if src: |
|
|
tdict[tid] = {"transcript": src} |
|
|
return tdict |
|
|
|
|
|
def _read_text(path: str) -> str: |
|
|
with open(path, "r", encoding="utf-8") as f: |
|
|
return f.read() |
|
|
|
|
|
def _normalize_primock_id(stem: str) -> str: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
s = stem |
|
|
s = s.replace("day", "primock57_") |
|
|
s = s.replace("consultation0", "") |
|
|
s = s.replace("consultation", "") |
|
|
return s |
|
|
|
|
|
def _build_primock_transcript_dict(primock_path): |
|
|
|
|
|
script_path = os.path.join(primock_path, "primock57-main", "scripts", "textgrid_to_transcript.py") |
|
|
if not os.path.exists(script_path): |
|
|
print(f"Script {script_path} does not exist. Skipping Primock data.") |
|
|
return |
|
|
transcript_path = os.path.join(primock_path, "primock57-main", "transcripts") |
|
|
primock_transcript_path = os.path.join(primock_path, "primock_transcript") |
|
|
os.system(f"python {script_path} --transcript_path {transcript_path} --output_path {primock_transcript_path}") |
|
|
print(f"Primock data saved to {primock_transcript_path}") |
|
|
|
|
|
|
|
|
transcript_data = {} |
|
|
for dirpath, _, files in os.walk(primock_transcript_path): |
|
|
for filename in files: |
|
|
if filename.endswith(".txt"): |
|
|
file_path = os.path.join(dirpath, filename) |
|
|
with open(file_path, 'r') as f: |
|
|
primock_id = filename.replace(".txt", "") |
|
|
primock_id = primock_id.replace("day", "primock57_") |
|
|
primock_id = primock_id.replace("consultation0", "") |
|
|
primock_id = primock_id.replace("consultation", "") |
|
|
|
|
|
data = f.readlines() |
|
|
data = [line.strip() for line in data if line.strip()] |
|
|
transcript_lines = [] |
|
|
for line in data: |
|
|
line = line.replace("Doctor:", "[doctor]") |
|
|
line = line.replace("Patient:", "[patient]") |
|
|
transcript_lines.append(line) |
|
|
transcript_data[primock_id] = { |
|
|
"transcript": "\n".join(transcript_lines) |
|
|
} |
|
|
|
|
|
print(f"Found {len(transcript_data)} transcripts in Primock data") |
|
|
return transcript_data |
|
|
|
|
|
def _load_annotations(path: str) -> List[dict]: |
|
|
|
|
|
if path.lower().endswith((".jsonl", ".ndjson")): |
|
|
out = [] |
|
|
with open(path, "r", encoding="utf-8") as f: |
|
|
for line in f: |
|
|
line = line.strip() |
|
|
if line: |
|
|
out.append(json.loads(line)) |
|
|
return out |
|
|
with open(path, "r", encoding="utf-8") as f: |
|
|
data = json.load(f) |
|
|
if isinstance(data, list): |
|
|
return data |
|
|
raise ValueError(f"{path} must be a JSON list (or JSONL).") |
|
|
|
|
|
|
|
|
|
|
|
class SimordMergeConfig(datasets.BuilderConfig): |
|
|
def __init__(self, **kwargs): |
|
|
super().__init__(version=datasets.Version("1.0.0"), **kwargs) |
|
|
|
|
|
class SimordMerge(datasets.GeneratorBasedBuilder): |
|
|
BUILDER_CONFIGS = [ |
|
|
SimordMergeConfig( |
|
|
name="default", |
|
|
description="SIMORD with transcripts merged via attach_transcript_section from mediqa-oe.", |
|
|
) |
|
|
] |
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
features = datasets.Features( |
|
|
{ |
|
|
"id": datasets.Value("string"), |
|
|
"transcript": datasets.Sequence( |
|
|
{ |
|
|
"turn_id": datasets.Value("int32"), |
|
|
"speaker": datasets.Value("string"), |
|
|
"transcript": datasets.Value("string"), |
|
|
} |
|
|
), |
|
|
"expected_orders": datasets.Sequence( |
|
|
{ |
|
|
"order_type": datasets.Value("string"), |
|
|
"description": datasets.Value("string"), |
|
|
"reason": datasets.Value("string"), |
|
|
"provenance": datasets.Sequence(datasets.Value("int32")), |
|
|
} |
|
|
) |
|
|
} |
|
|
) |
|
|
return datasets.DatasetInfo( |
|
|
description=_DESCRIPTION, |
|
|
features=features, |
|
|
homepage=_HOMEPAGE, |
|
|
license=_LICENSE, |
|
|
citation=_CITATION, |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
data_dir = dl_manager.download_and_extract(DATA_URLS) |
|
|
|
|
|
file_map = { |
|
|
"train.json": ("train", datasets.Split.TRAIN), |
|
|
"dev.json": ("test1", "dev"), |
|
|
"test.json": ("test2", "test"), |
|
|
} |
|
|
|
|
|
aci_root = dl_manager.download_and_extract(ACI_BENCH_URL) |
|
|
primock_root = dl_manager.download_and_extract(PRIMOCK_URL) |
|
|
|
|
|
self._transcript_dict = {} |
|
|
self._transcript_dict.update(_build_aci_transcript_dict(aci_root)) |
|
|
self._transcript_dict.update(_build_primock_transcript_dict(primock_root)) |
|
|
|
|
|
splits = [] |
|
|
missing = [] |
|
|
for fname, (exposed, split_name) in file_map.items(): |
|
|
p = data_dir[split_name] |
|
|
if os.path.isfile(p): |
|
|
splits.append( |
|
|
datasets.SplitGenerator( |
|
|
name=exposed, |
|
|
gen_kwargs={"ann_path": p}, |
|
|
) |
|
|
) |
|
|
else: |
|
|
missing.append(fname) |
|
|
|
|
|
if not splits: |
|
|
raise FileNotFoundError( |
|
|
f"No split files found under {data_dir}. " |
|
|
f"Expected one of: {', '.join(file_map.keys())}. Missing: {missing}" |
|
|
) |
|
|
|
|
|
return splits |
|
|
|
|
|
def _generate_examples(self, ann_path: str): |
|
|
section = _load_annotations(ann_path) |
|
|
attach_transcript_section(section, self._transcript_dict) |
|
|
for idx, rec in enumerate(section): |
|
|
rid = str(rec.get("id", idx)) |
|
|
turns = rec.get("transcript") or [] |
|
|
yield idx, { |
|
|
"id": rid, |
|
|
"transcript": turns, |
|
|
"expected_orders": rec, |
|
|
} |
|
|
|