Update simord_hf_loader.py
Browse files- simord_hf_loader.py +28 -34
simord_hf_loader.py
CHANGED
|
@@ -222,47 +222,41 @@ class SimordMerge(datasets.GeneratorBasedBuilder):
|
|
| 222 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
| 223 |
data_dir = self.config.data_dir or os.getcwd()
|
| 224 |
|
| 225 |
-
#
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
# Download & extract upstream sources (same URLs your script uses)
|
| 235 |
aci_dir = dl_manager.download_and_extract(ACI_BENCH_URL)
|
| 236 |
primock_dir = dl_manager.download_and_extract(PRIMOCK_URL)
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
section = _load_annotations(ann_path)
|
| 255 |
-
|
| 256 |
-
# attach_transcript_section mutates in place, adding parsed turns to d["transcript"]
|
| 257 |
attach_transcript_section(section, self._transcript_dict)
|
| 258 |
-
|
| 259 |
for idx, rec in enumerate(section):
|
| 260 |
rid = str(rec.get("id", idx))
|
| 261 |
-
turns = rec.get("transcript") or []
|
| 262 |
-
# Store the full augmented record in "raw" for maximum fidelity
|
| 263 |
-
raw_str = json.dumps(rec, ensure_ascii=False)
|
| 264 |
yield idx, {
|
| 265 |
"id": rid,
|
|
|
|
| 266 |
"transcript": turns,
|
| 267 |
-
"raw":
|
| 268 |
}
|
|
|
|
| 222 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
| 223 |
data_dir = self.config.data_dir or os.getcwd()
|
| 224 |
|
| 225 |
+
# Map from local file name → desired HF split name
|
| 226 |
+
file_map = {
|
| 227 |
+
"train.json": ("train", datasets.Split.TRAIN),
|
| 228 |
+
"dev.json": ("test1", "test1"), # expose dev as "test1"
|
| 229 |
+
"test.json": ("test2", "test2"), # expose test as "test2"
|
| 230 |
+
}
|
| 231 |
+
|
| 232 |
+
# Download upstream corpora once
|
|
|
|
|
|
|
| 233 |
aci_dir = dl_manager.download_and_extract(ACI_BENCH_URL)
|
| 234 |
primock_dir = dl_manager.download_and_extract(PRIMOCK_URL)
|
| 235 |
+
self._transcript_dict = {}
|
| 236 |
+
self._transcript_dict.update(_build_aci_transcript_dict(aci_dir))
|
| 237 |
+
self._transcript_dict.update(_build_primock_transcript_dict(primock_dir))
|
| 238 |
+
|
| 239 |
+
splits = []
|
| 240 |
+
for fname, (exposed_name, hf_split) in file_map.items():
|
| 241 |
+
path = os.path.join(data_dir, fname)
|
| 242 |
+
if os.path.isfile(path):
|
| 243 |
+
splits.append(
|
| 244 |
+
datasets.SplitGenerator(
|
| 245 |
+
name=hf_split,
|
| 246 |
+
gen_kwargs={"ann_path": path, "exposed_name": exposed_name},
|
| 247 |
+
)
|
| 248 |
+
)
|
| 249 |
+
return splits
|
| 250 |
+
|
| 251 |
+
def _generate_examples(self, ann_path: str, exposed_name: str):
|
| 252 |
section = _load_annotations(ann_path)
|
|
|
|
|
|
|
| 253 |
attach_transcript_section(section, self._transcript_dict)
|
|
|
|
| 254 |
for idx, rec in enumerate(section):
|
| 255 |
rid = str(rec.get("id", idx))
|
| 256 |
+
turns = rec.get("transcript") or []
|
|
|
|
|
|
|
| 257 |
yield idx, {
|
| 258 |
"id": rid,
|
| 259 |
+
"split": exposed_name, # optional field to carry which split it came from
|
| 260 |
"transcript": turns,
|
| 261 |
+
"raw": json.dumps(rec, ensure_ascii=False),
|
| 262 |
}
|