Create simord_hf_loader.py
Browse files- simord_hf_loader.py +268 -0
simord_hf_loader.py
ADDED
|
@@ -0,0 +1,268 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# simord_hf_loader.py
|
| 2 |
+
# HF Datasets loader that uses mediqa-oe.data.process_data.attach_transcript_section
|
| 3 |
+
# to merge transcripts into local annotations: train, test1, test2.
|
| 4 |
+
#
|
| 5 |
+
# Dataset license: CDLA-2.0-permissive (for SIMORD). Respect upstream source licenses.
|
| 6 |
+
#
|
| 7 |
+
# Requirements:
|
| 8 |
+
# pip install datasets nltk requests
|
| 9 |
+
# pip install -e /path/to/mediqa-oe # or add repo to PYTHONPATH
|
| 10 |
+
#
|
| 11 |
+
# Usage:
|
| 12 |
+
# from datasets import load_dataset
|
| 13 |
+
# ds = load_dataset(
|
| 14 |
+
# "path/to/simord_hf_loader.py",
|
| 15 |
+
# data_dir="path/to/annotations_dir", # with train.json, test1.json, test2.json (lists of dicts)
|
| 16 |
+
# )
|
| 17 |
+
# print(ds)
|
| 18 |
+
#
|
| 19 |
+
# The loader will auto-download ACI-Bench and PriMock57 from GitHub to create transcript_dict.
|
| 20 |
+
|
| 21 |
+
import json
|
| 22 |
+
import os
|
| 23 |
+
import re
|
| 24 |
+
from typing import Dict, Iterable, List, Tuple
|
| 25 |
+
|
| 26 |
+
import datasets
|
| 27 |
+
import nltk
|
| 28 |
+
|
| 29 |
+
# Ensure sentence tokenizer needed by mediqa-oe code is available
|
| 30 |
+
try:
|
| 31 |
+
nltk.data.find("tokenizers/punkt_tab")
|
| 32 |
+
except LookupError:
|
| 33 |
+
nltk.download("punkt_tab")
|
| 34 |
+
|
| 35 |
+
# ---- Import the exact merge function from your repo ----
|
| 36 |
+
# (Relies on your local mediqa-oe being importable)
|
| 37 |
+
from mediqa_oe.data.process_data import attach_transcript_section # noqa: E402
|
| 38 |
+
|
| 39 |
+
# ---- Sources (same as your script) ----
|
| 40 |
+
ACI_BENCH_URL = "https://github.com/wyim/aci-bench/archive/refs/heads/main.zip"
|
| 41 |
+
PRIMOCK_URL = "https://github.com/babylonhealth/primock57/archive/refs/heads/main.zip"
|
| 42 |
+
|
| 43 |
+
_DESCRIPTION = """\
|
| 44 |
+
SIMORD loader that merges transcripts from ACI-Bench and PriMock57 into local annotations
|
| 45 |
+
using mediqa-oe.data.process_data.attach_transcript_section.
|
| 46 |
+
"""
|
| 47 |
+
_CITATION = r"""@article{corbeil2025empowering,
|
| 48 |
+
title={Empowering Healthcare Practitioners with Language Models: Structuring Speech Transcripts in Two Real-World Clinical Applications},
|
| 49 |
+
author={Corbeil, Jean-Philippe and Ben Abacha, Asma and Michalopoulos, George and Swazinna, Patrick and Del-Agua, Miguel and Tremblay, Julien and Jeeson Daniel, Aju and Bader, Corey and Cho, Yoon-Chan and Krishnan, Parvathi and Bodenstab, Nathan and Lin, Tony and Teng, Wen and Beaulieu, Francois and Vozila, Paul},
|
| 50 |
+
journal={arXiv preprint arXiv:2507.05517},
|
| 51 |
+
year={2025}
|
| 52 |
+
}"""
|
| 53 |
+
_LICENSE = "CDLA-2.0-permissive"
|
| 54 |
+
_HOMEPAGE = "https://huggingface.co/datasets/<your-org>/SIMORD"
|
| 55 |
+
|
| 56 |
+
# ----------------------- helpers to read upstream transcripts -----------------------
|
| 57 |
+
|
| 58 |
+
def _walk_json_files(directory: str) -> List[str]:
|
| 59 |
+
out = []
|
| 60 |
+
for d, _, files in os.walk(directory):
|
| 61 |
+
for fn in files:
|
| 62 |
+
if fn.lower().endswith(".json"):
|
| 63 |
+
out.append(os.path.join(d, fn))
|
| 64 |
+
out.sort()
|
| 65 |
+
return out
|
| 66 |
+
|
| 67 |
+
def _read_json_records(path: str) -> Iterable[dict]:
|
| 68 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 69 |
+
data = json.load(f)
|
| 70 |
+
if isinstance(data, dict) and "data" in data and isinstance(data["data"], list):
|
| 71 |
+
for r in data["data"]:
|
| 72 |
+
yield r
|
| 73 |
+
elif isinstance(data, list):
|
| 74 |
+
for r in data:
|
| 75 |
+
yield r
|
| 76 |
+
else:
|
| 77 |
+
# single dict record
|
| 78 |
+
yield data
|
| 79 |
+
|
| 80 |
+
def _normalize_id_from_aci(file_field: str, basename: str) -> str:
|
| 81 |
+
# matches your script:
|
| 82 |
+
# file_id = "_".join(d.get("file", "").split("-")[0:2])
|
| 83 |
+
# transcript_id = "acibench_" + file_id + "_" + basename
|
| 84 |
+
file_id = "_".join((file_field or "").split("-")[0:2])
|
| 85 |
+
return f"acibench_{file_id}_{basename}"
|
| 86 |
+
|
| 87 |
+
def _build_aci_transcript_dict(root: str) -> Dict[str, dict]:
|
| 88 |
+
"""
|
| 89 |
+
Mirror of read_aci_bench_data + walk_aci_bench_directory:
|
| 90 |
+
looks in .../aci-bench-main/data/challenge_data_json and .../src_experiment_data_json
|
| 91 |
+
and builds {transcript_id: {"transcript": <src_text>}}
|
| 92 |
+
"""
|
| 93 |
+
tdict: Dict[str, dict] = {}
|
| 94 |
+
base = None
|
| 95 |
+
# Find the 'aci-bench-main' folder inside root
|
| 96 |
+
for name in os.listdir(root):
|
| 97 |
+
if name.startswith("aci-bench"):
|
| 98 |
+
base = os.path.join(root, name)
|
| 99 |
+
break
|
| 100 |
+
if not base:
|
| 101 |
+
return tdict
|
| 102 |
+
|
| 103 |
+
for sub in ("data/challenge_data_json", "data/src_experiment_data_json"):
|
| 104 |
+
p = os.path.join(base, sub)
|
| 105 |
+
if not os.path.isdir(p):
|
| 106 |
+
continue
|
| 107 |
+
for fp in _walk_json_files(p):
|
| 108 |
+
basename = os.path.splitext(os.path.basename(fp))[0]
|
| 109 |
+
for rec in _read_json_records(fp):
|
| 110 |
+
src = rec.get("src")
|
| 111 |
+
file_field = rec.get("file", "")
|
| 112 |
+
tid = _normalize_id_from_aci(file_field, basename)
|
| 113 |
+
if src:
|
| 114 |
+
tdict[tid] = {"transcript": src}
|
| 115 |
+
return tdict
|
| 116 |
+
|
| 117 |
+
def _read_text(path: str) -> str:
|
| 118 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 119 |
+
return f.read()
|
| 120 |
+
|
| 121 |
+
def _normalize_primock_id(stem: str) -> str:
|
| 122 |
+
# replicate replacements used in your script:
|
| 123 |
+
# primock_id = filename.replace("day", "primock57_")
|
| 124 |
+
# primock_id = primock_id.replace("consultation0", "")
|
| 125 |
+
# primock_id = primock_id.replace("consultation", "")
|
| 126 |
+
s = stem
|
| 127 |
+
s = s.replace("day", "primock57_")
|
| 128 |
+
s = s.replace("consultation0", "")
|
| 129 |
+
s = s.replace("consultation", "")
|
| 130 |
+
return s
|
| 131 |
+
|
| 132 |
+
def _build_primock_transcript_dict(root: str) -> Dict[str, dict]:
|
| 133 |
+
"""
|
| 134 |
+
Mirror of read_primock_data post-conversion:
|
| 135 |
+
reads *.txt in primock57-main/transcripts and applies the same tag normalization.
|
| 136 |
+
"""
|
| 137 |
+
tdict: Dict[str, dict] = {}
|
| 138 |
+
base = None
|
| 139 |
+
for name in os.listdir(root):
|
| 140 |
+
if name.startswith("primock57"):
|
| 141 |
+
base = os.path.join(root, name)
|
| 142 |
+
break
|
| 143 |
+
if not base:
|
| 144 |
+
return tdict
|
| 145 |
+
|
| 146 |
+
tx_dir = os.path.join(base, "transcripts")
|
| 147 |
+
if not os.path.isdir(tx_dir):
|
| 148 |
+
# If transcripts haven't been generated yet by upstream script, we skip.
|
| 149 |
+
return tdict
|
| 150 |
+
|
| 151 |
+
for fn in os.listdir(tx_dir):
|
| 152 |
+
if not fn.lower().endswith(".txt"):
|
| 153 |
+
continue
|
| 154 |
+
fp = os.path.join(tx_dir, fn)
|
| 155 |
+
lines = [ln.strip() for ln in _read_text(fp).splitlines() if ln.strip()]
|
| 156 |
+
norm = []
|
| 157 |
+
for line in lines:
|
| 158 |
+
line = line.replace("Doctor:", "[doctor]").replace("Patient:", "[patient]")
|
| 159 |
+
norm.append(line)
|
| 160 |
+
stem = os.path.splitext(fn)[0]
|
| 161 |
+
primock_id = _normalize_primock_id(stem)
|
| 162 |
+
tdict[primock_id] = {"transcript": "\n".join(norm)}
|
| 163 |
+
return tdict
|
| 164 |
+
|
| 165 |
+
def _load_annotations(path: str) -> List[dict]:
|
| 166 |
+
# Expect a JSON array (list of dicts). If JSONL, we also handle it gracefully.
|
| 167 |
+
if path.lower().endswith((".jsonl", ".ndjson")):
|
| 168 |
+
out = []
|
| 169 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 170 |
+
for line in f:
|
| 171 |
+
line = line.strip()
|
| 172 |
+
if line:
|
| 173 |
+
out.append(json.loads(line))
|
| 174 |
+
return out
|
| 175 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 176 |
+
data = json.load(f)
|
| 177 |
+
if isinstance(data, list):
|
| 178 |
+
return data
|
| 179 |
+
raise ValueError(f"{path} must be a JSON list (or JSONL).")
|
| 180 |
+
|
| 181 |
+
# ----------------------- HF builder -----------------------
|
| 182 |
+
|
| 183 |
+
class SimordMergeConfig(datasets.BuilderConfig):
|
| 184 |
+
def __init__(self, **kwargs):
|
| 185 |
+
super().__init__(version=datasets.Version("1.0.0"), **kwargs)
|
| 186 |
+
|
| 187 |
+
class SimordMerge(datasets.GeneratorBasedBuilder):
|
| 188 |
+
BUILDER_CONFIGS = [
|
| 189 |
+
SimordMergeConfig(
|
| 190 |
+
name="default",
|
| 191 |
+
description="SIMORD with transcripts merged via attach_transcript_section from mediqa-oe.",
|
| 192 |
+
)
|
| 193 |
+
]
|
| 194 |
+
DEFAULT_CONFIG_NAME = "default"
|
| 195 |
+
|
| 196 |
+
def _info(self) -> datasets.DatasetInfo:
|
| 197 |
+
# We expose a compact schema:
|
| 198 |
+
# - id
|
| 199 |
+
# - transcript: sequence of {turn_id, speaker, transcript}
|
| 200 |
+
# - raw: JSON string dump of the full (possibly augmented) annotation record
|
| 201 |
+
features = datasets.Features(
|
| 202 |
+
{
|
| 203 |
+
"id": datasets.Value("string"),
|
| 204 |
+
"transcript": datasets.Sequence(
|
| 205 |
+
{
|
| 206 |
+
"turn_id": datasets.Value("int32"),
|
| 207 |
+
"speaker": datasets.Value("string"),
|
| 208 |
+
"transcript": datasets.Value("string"),
|
| 209 |
+
}
|
| 210 |
+
),
|
| 211 |
+
"raw": datasets.Value("string"),
|
| 212 |
+
}
|
| 213 |
+
)
|
| 214 |
+
return datasets.DatasetInfo(
|
| 215 |
+
description=_DESCRIPTION,
|
| 216 |
+
features=features,
|
| 217 |
+
homepage=_HOMEPAGE,
|
| 218 |
+
license=_LICENSE,
|
| 219 |
+
citation=_CITATION,
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
| 223 |
+
data_dir = self.config.data_dir or os.getcwd()
|
| 224 |
+
|
| 225 |
+
# Local annotations (lists of dicts with at least an "id")
|
| 226 |
+
train_path = os.path.join(data_dir, "train.json")
|
| 227 |
+
test1_path = os.path.join(data_dir, "test1.json")
|
| 228 |
+
test2_path = os.path.join(data_dir, "test2.json")
|
| 229 |
+
|
| 230 |
+
for p in (train_path, test1_path, test2_path):
|
| 231 |
+
if not os.path.isfile(p):
|
| 232 |
+
raise FileNotFoundError(f"Expected annotation file: {p}")
|
| 233 |
+
|
| 234 |
+
# Download & extract upstream sources (same URLs your script uses)
|
| 235 |
+
aci_dir = dl_manager.download_and_extract(ACI_BENCH_URL)
|
| 236 |
+
primock_dir = dl_manager.download_and_extract(PRIMOCK_URL)
|
| 237 |
+
|
| 238 |
+
# Build transcript_dict exactly like your script would
|
| 239 |
+
transcript_dict: Dict[str, dict] = {}
|
| 240 |
+
transcript_dict.update(_build_aci_transcript_dict(aci_dir))
|
| 241 |
+
transcript_dict.update(_build_primock_transcript_dict(primock_dir))
|
| 242 |
+
|
| 243 |
+
# Stash for use in _generate_examples
|
| 244 |
+
self._transcript_dict = transcript_dict
|
| 245 |
+
|
| 246 |
+
return [
|
| 247 |
+
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"ann_path": train_path}),
|
| 248 |
+
datasets.SplitGenerator(name="test1", gen_kwargs={"ann_path": test1_path}),
|
| 249 |
+
datasets.SplitGenerator(name="test2", gen_kwargs={"ann_path": test2_path}),
|
| 250 |
+
]
|
| 251 |
+
|
| 252 |
+
def _generate_examples(self, ann_path: str):
|
| 253 |
+
# Load the annotation list and call your function to attach transcripts
|
| 254 |
+
section = _load_annotations(ann_path)
|
| 255 |
+
|
| 256 |
+
# attach_transcript_section mutates in place, adding parsed turns to d["transcript"]
|
| 257 |
+
attach_transcript_section(section, self._transcript_dict)
|
| 258 |
+
|
| 259 |
+
for idx, rec in enumerate(section):
|
| 260 |
+
rid = str(rec.get("id", idx))
|
| 261 |
+
turns = rec.get("transcript") or [] # now a list of {turn_id, speaker, transcript}
|
| 262 |
+
# Store the full augmented record in "raw" for maximum fidelity
|
| 263 |
+
raw_str = json.dumps(rec, ensure_ascii=False)
|
| 264 |
+
yield idx, {
|
| 265 |
+
"id": rid,
|
| 266 |
+
"transcript": turns,
|
| 267 |
+
"raw": raw_str,
|
| 268 |
+
}
|