jpcorb20 commited on
Commit
6a8f90f
·
verified ·
1 Parent(s): beba983

Update simord_hf_loader.py

Browse files
Files changed (1) hide show
  1. simord_hf_loader.py +28 -34
simord_hf_loader.py CHANGED
@@ -222,47 +222,41 @@ class SimordMerge(datasets.GeneratorBasedBuilder):
222
  def _split_generators(self, dl_manager: datasets.DownloadManager):
223
  data_dir = self.config.data_dir or os.getcwd()
224
 
225
- # Local annotations (lists of dicts with at least an "id")
226
- train_path = os.path.join(data_dir, "train.json")
227
- test1_path = os.path.join(data_dir, "test1.json")
228
- test2_path = os.path.join(data_dir, "test2.json")
229
-
230
- for p in (train_path, test1_path, test2_path):
231
- if not os.path.isfile(p):
232
- raise FileNotFoundError(f"Expected annotation file: {p}")
233
-
234
- # Download & extract upstream sources (same URLs your script uses)
235
  aci_dir = dl_manager.download_and_extract(ACI_BENCH_URL)
236
  primock_dir = dl_manager.download_and_extract(PRIMOCK_URL)
237
-
238
- # Build transcript_dict exactly like your script would
239
- transcript_dict: Dict[str, dict] = {}
240
- transcript_dict.update(_build_aci_transcript_dict(aci_dir))
241
- transcript_dict.update(_build_primock_transcript_dict(primock_dir))
242
-
243
- # Stash for use in _generate_examples
244
- self._transcript_dict = transcript_dict
245
-
246
- return [
247
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"ann_path": train_path}),
248
- datasets.SplitGenerator(name="test1", gen_kwargs={"ann_path": test1_path}),
249
- datasets.SplitGenerator(name="test2", gen_kwargs={"ann_path": test2_path}),
250
- ]
251
-
252
- def _generate_examples(self, ann_path: str):
253
- # Load the annotation list and call your function to attach transcripts
254
  section = _load_annotations(ann_path)
255
-
256
- # attach_transcript_section mutates in place, adding parsed turns to d["transcript"]
257
  attach_transcript_section(section, self._transcript_dict)
258
-
259
  for idx, rec in enumerate(section):
260
  rid = str(rec.get("id", idx))
261
- turns = rec.get("transcript") or [] # now a list of {turn_id, speaker, transcript}
262
- # Store the full augmented record in "raw" for maximum fidelity
263
- raw_str = json.dumps(rec, ensure_ascii=False)
264
  yield idx, {
265
  "id": rid,
 
266
  "transcript": turns,
267
- "raw": raw_str,
268
  }
 
222
  def _split_generators(self, dl_manager: datasets.DownloadManager):
223
  data_dir = self.config.data_dir or os.getcwd()
224
 
225
+ # Map from local file name desired HF split name
226
+ file_map = {
227
+ "train.json": ("train", datasets.Split.TRAIN),
228
+ "dev.json": ("test1", "test1"), # expose dev as "test1"
229
+ "test.json": ("test2", "test2"), # expose test as "test2"
230
+ }
231
+
232
+ # Download upstream corpora once
 
 
233
  aci_dir = dl_manager.download_and_extract(ACI_BENCH_URL)
234
  primock_dir = dl_manager.download_and_extract(PRIMOCK_URL)
235
+ self._transcript_dict = {}
236
+ self._transcript_dict.update(_build_aci_transcript_dict(aci_dir))
237
+ self._transcript_dict.update(_build_primock_transcript_dict(primock_dir))
238
+
239
+ splits = []
240
+ for fname, (exposed_name, hf_split) in file_map.items():
241
+ path = os.path.join(data_dir, fname)
242
+ if os.path.isfile(path):
243
+ splits.append(
244
+ datasets.SplitGenerator(
245
+ name=hf_split,
246
+ gen_kwargs={"ann_path": path, "exposed_name": exposed_name},
247
+ )
248
+ )
249
+ return splits
250
+
251
+ def _generate_examples(self, ann_path: str, exposed_name: str):
252
  section = _load_annotations(ann_path)
 
 
253
  attach_transcript_section(section, self._transcript_dict)
 
254
  for idx, rec in enumerate(section):
255
  rid = str(rec.get("id", idx))
256
+ turns = rec.get("transcript") or []
 
 
257
  yield idx, {
258
  "id": rid,
259
+ "split": exposed_name, # optional field to carry which split it came from
260
  "transcript": turns,
261
+ "raw": json.dumps(rec, ensure_ascii=False),
262
  }