Matej Klemen commited on
Commit
b3d2e59
1 Parent(s): c8f8673

Add paragraph-level aggregation

Browse files
Files changed (2) hide show
  1. README.md +6 -1
  2. solar3.py +106 -2
README.md CHANGED
@@ -41,7 +41,12 @@ document available at https://www.clarin.si/repository/xmlui/bitstream/handle/11
41
 
42
  By default the dataset is provided at **sentence-level** (125867 instances): each instance contains a source (the original) and a target (the corrected) sentence. Note that either the source or the target sentence in an instance may be missing - this usually happens when a source sentence is marked as redundant or when a new sentence is added by the teacher. Additionally, a source or a target sentence may appear in multiple instances - for example, this happens when one sentence gets divided into multiple sentences.
43
 
44
- There is also an option to aggregate the instances at the **document-level** by explicitly providing the correct config: `datasets.load_dataset("cjvt/solar3", "document_level")`.
 
 
 
 
 
45
 
46
  ### Supported Tasks and Leaderboards
47
 
 
41
 
42
  By default the dataset is provided at **sentence-level** (125867 instances): each instance contains a source (the original) and a target (the corrected) sentence. Note that either the source or the target sentence in an instance may be missing - this usually happens when a source sentence is marked as redundant or when a new sentence is added by the teacher. Additionally, a source or a target sentence may appear in multiple instances - for example, this happens when one sentence gets divided into multiple sentences.
43
 
44
+ There is also an option to aggregate the instances at the **document-level** or **paragraph-level**
45
+ by explicitly providing the correct config:
46
+ ```
47
+ datasets.load_dataset("cjvt/solar3", "paragraph_level")`
48
+ datasets.load_dataset("cjvt/solar3", "document_level")`
49
+ ```
50
 
51
  ### Supported Tasks and Leaderboards
52
 
solar3.py CHANGED
@@ -126,6 +126,7 @@ def read_data(data_path):
126
  data[id_sent] = {
127
  "id_doc": id_text,
128
  "doc_title": text_title,
 
129
  "id_token": ids, "form": forms, "lemma": lemmas, "ana": msds_jos, "msd": msds_ud, "ne_tag": nes, "space_after": spaces_after,
130
  "is_manually_validated": is_manually_validated
131
  }
@@ -142,6 +143,8 @@ class Solar3(datasets.GeneratorBasedBuilder):
142
  BUILDER_CONFIGS = [
143
  datasets.BuilderConfig(name="sentence_level", version=VERSION,
144
  description="Annotations at sentence-level."),
 
 
145
  datasets.BuilderConfig(name="document_level", version=VERSION,
146
  description="Annotations at document-level."),
147
  ]
@@ -225,10 +228,14 @@ class Solar3(datasets.GeneratorBasedBuilder):
225
 
226
  if len(involved_src_sents) > 0:
227
  src_sent_data = deepcopy(source_data[involved_src_sents[0]])
 
 
228
 
229
  for src_sent_id in involved_src_sents[1:]:
230
  curr_sent_data = source_data[src_sent_id]
 
231
  src_sent_data["id_token"].extend(curr_sent_data["id_token"])
 
232
  src_sent_data["form"].extend(curr_sent_data["form"])
233
  src_sent_data["lemma"].extend(curr_sent_data["lemma"])
234
  src_sent_data["ana"].extend(curr_sent_data["ana"])
@@ -244,10 +251,14 @@ class Solar3(datasets.GeneratorBasedBuilder):
244
 
245
  if len(involved_tgt_sents) > 0:
246
  tgt_sent_data = deepcopy(target_data[involved_tgt_sents[0]])
 
 
247
 
248
  for tgt_sent_id in involved_tgt_sents[1:]:
249
  curr_sent_data = target_data[tgt_sent_id]
 
250
  tgt_sent_data["id_token"].extend(curr_sent_data["id_token"])
 
251
  tgt_sent_data["form"].extend(curr_sent_data["form"])
252
  tgt_sent_data["lemma"].extend(curr_sent_data["lemma"])
253
  tgt_sent_data["ana"].extend(curr_sent_data["ana"])
@@ -288,6 +299,7 @@ class Solar3(datasets.GeneratorBasedBuilder):
288
  "id_doc": id_doc[:-1], # doc ID without the "s" or "t" info
289
  "doc_title": doc_title,
290
  "is_manually_validated": is_manually_validated,
 
291
  "id_src_tokens": src_sent_data.get("id_token", []),
292
  "src_tokens": src_sent_data.get("form", []),
293
  "src_ling_annotations": {
@@ -297,6 +309,7 @@ class Solar3(datasets.GeneratorBasedBuilder):
297
  "ne_tag": src_sent_data.get("ne_tag", []),
298
  "space_after": src_sent_data.get("space_after", [])
299
  },
 
300
  "id_tgt_tokens": tgt_sent_data.get("id_token", []),
301
  "tgt_tokens": tgt_sent_data.get("form", []),
302
  "tgt_ling_annotations": {
@@ -309,6 +322,94 @@ class Solar3(datasets.GeneratorBasedBuilder):
309
  "corrections": corr_data
310
  }
311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
  @staticmethod
313
  def aggregate_docs(sent_level_data):
314
  # NOTE: assuming here that `sent_level_data` is pre-sorted by id_doc, which is done in the raw data
@@ -397,9 +498,12 @@ class Solar3(datasets.GeneratorBasedBuilder):
397
  sent_level_data = list(Solar3.generate_sentences(source_path, target_path, links_path))
398
 
399
  if self.config.name == "sentence_level":
400
- # Remove IDs that are only useful for aggregating the document-level data
401
  for i, instance in sent_level_data:
402
- yield i, {_k: _v for _k, _v in instance.items() if _k not in {"id_src_tokens", "id_tgt_tokens"}}
 
 
 
403
  else:
404
  yield from list(Solar3.aggregate_docs(sent_level_data))
405
 
 
126
  data[id_sent] = {
127
  "id_doc": id_text,
128
  "doc_title": text_title,
129
+ "idx_par": idx_par,
130
  "id_token": ids, "form": forms, "lemma": lemmas, "ana": msds_jos, "msd": msds_ud, "ne_tag": nes, "space_after": spaces_after,
131
  "is_manually_validated": is_manually_validated
132
  }
 
143
  BUILDER_CONFIGS = [
144
  datasets.BuilderConfig(name="sentence_level", version=VERSION,
145
  description="Annotations at sentence-level."),
146
+ datasets.BuilderConfig(name="paragraph_level", version=VERSION,
147
+ description="Annotations at paragraph-level."),
148
  datasets.BuilderConfig(name="document_level", version=VERSION,
149
  description="Annotations at document-level."),
150
  ]
 
228
 
229
  if len(involved_src_sents) > 0:
230
  src_sent_data = deepcopy(source_data[involved_src_sents[0]])
231
+ if not isinstance(src_sent_data["idx_par"], list):
232
+ src_sent_data["idx_par"] = [src_sent_data["idx_par"]]
233
 
234
  for src_sent_id in involved_src_sents[1:]:
235
  curr_sent_data = source_data[src_sent_id]
236
+
237
  src_sent_data["id_token"].extend(curr_sent_data["id_token"])
238
+ src_sent_data["idx_par"].append(curr_sent_data["idx_par"])
239
  src_sent_data["form"].extend(curr_sent_data["form"])
240
  src_sent_data["lemma"].extend(curr_sent_data["lemma"])
241
  src_sent_data["ana"].extend(curr_sent_data["ana"])
 
251
 
252
  if len(involved_tgt_sents) > 0:
253
  tgt_sent_data = deepcopy(target_data[involved_tgt_sents[0]])
254
+ if not isinstance(tgt_sent_data["idx_par"], list):
255
+ tgt_sent_data["idx_par"] = [tgt_sent_data["idx_par"]]
256
 
257
  for tgt_sent_id in involved_tgt_sents[1:]:
258
  curr_sent_data = target_data[tgt_sent_id]
259
+
260
  tgt_sent_data["id_token"].extend(curr_sent_data["id_token"])
261
+ tgt_sent_data["idx_par"].append(curr_sent_data["idx_par"])
262
  tgt_sent_data["form"].extend(curr_sent_data["form"])
263
  tgt_sent_data["lemma"].extend(curr_sent_data["lemma"])
264
  tgt_sent_data["ana"].extend(curr_sent_data["ana"])
 
299
  "id_doc": id_doc[:-1], # doc ID without the "s" or "t" info
300
  "doc_title": doc_title,
301
  "is_manually_validated": is_manually_validated,
302
+ "idx_src_par": src_sent_data.get("idx_par", []),
303
  "id_src_tokens": src_sent_data.get("id_token", []),
304
  "src_tokens": src_sent_data.get("form", []),
305
  "src_ling_annotations": {
 
309
  "ne_tag": src_sent_data.get("ne_tag", []),
310
  "space_after": src_sent_data.get("space_after", [])
311
  },
312
+ "idx_tgt_par": tgt_sent_data.get("idx_par", []),
313
  "id_tgt_tokens": tgt_sent_data.get("id_token", []),
314
  "tgt_tokens": tgt_sent_data.get("form", []),
315
  "tgt_ling_annotations": {
 
322
  "corrections": corr_data
323
  }
324
 
325
+ @staticmethod
326
+ def aggregate_pars(sent_level_data):
327
+ # TODO: the code is a copypaste of the document aggregation, with an additional groupby - could use a refactor
328
+ uniq_idx_par = 0
329
+ for idx_doc, (curr_id, curr_group) in enumerate(groupby(sent_level_data, key=lambda tup: tup[1]["id_doc"])):
330
+ curr_instances = list(map(lambda tup: tup[1], curr_group)) # remove the redundant index info from datasets
331
+
332
+ # Some sentences have no `idx_src_par` because they are added by the teacher (not present in the source)
333
+ for idx_par, curr_par_group in groupby(
334
+ curr_instances,
335
+ key=lambda _inst: _inst["idx_src_par"][0] if len(_inst["idx_src_par"]) > 0 else
336
+ _inst["idx_tgt_par"][0]
337
+ ):
338
+ src_tokens, tgt_tokens, mapped_corrections = [], [], []
339
+ src_ling_anns = {"lemma": [], "ana": [], "msd": [], "ne_tag": [], "space_after": []}
340
+ tgt_ling_anns = {"lemma": [], "ana": [], "msd": [], "ne_tag": [], "space_after": []}
341
+ seen_src_tokens, seen_tgt_tokens = {}, {}
342
+ src_base, tgt_base = 0, 0
343
+ prev_src_base, prev_tgt_base = 0, 0
344
+
345
+ doc_title, is_validated = None, None
346
+ for curr_inst in curr_par_group:
347
+ doc_title, is_validated = curr_inst["doc_title"], curr_inst["is_manually_validated"]
348
+
349
+ id_src_toks, id_tgt_toks = curr_inst["id_src_tokens"], curr_inst["id_tgt_tokens"]
350
+ curr_src_toks, curr_tgt_toks = curr_inst["src_tokens"], curr_inst["tgt_tokens"]
351
+ curr_src_anns, curr_tgt_anns = curr_inst["src_ling_annotations"], curr_inst["tgt_ling_annotations"]
352
+ curr_corrs = curr_inst["corrections"]
353
+
354
+ num_added_src, num_added_tgt = 0, 0
355
+ for idx_position, (id_tok, tok) in enumerate(zip(id_src_toks, curr_src_toks)):
356
+ if id_tok not in seen_src_tokens:
357
+ src_tokens.append(tok)
358
+ src_ling_anns["lemma"].append(curr_src_anns["lemma"][idx_position])
359
+ src_ling_anns["ana"].append(curr_src_anns["ana"][idx_position])
360
+ src_ling_anns["msd"].append(curr_src_anns["msd"][idx_position])
361
+ src_ling_anns["ne_tag"].append(curr_src_anns["ne_tag"][idx_position])
362
+ src_ling_anns["space_after"].append(curr_src_anns["space_after"][idx_position])
363
+
364
+ seen_src_tokens[id_tok] = tok
365
+ num_added_src += 1
366
+
367
+ for idx_position, (id_tok, tok) in enumerate(zip(id_tgt_toks, curr_tgt_toks)):
368
+ if id_tok not in seen_tgt_tokens:
369
+ tgt_tokens.append(tok)
370
+ tgt_ling_anns["lemma"].append(curr_tgt_anns["lemma"][idx_position])
371
+ tgt_ling_anns["ana"].append(curr_tgt_anns["ana"][idx_position])
372
+ tgt_ling_anns["msd"].append(curr_tgt_anns["msd"][idx_position])
373
+ tgt_ling_anns["ne_tag"].append(curr_tgt_anns["ne_tag"][idx_position])
374
+ tgt_ling_anns["space_after"].append(curr_tgt_anns["space_after"][idx_position])
375
+
376
+ seen_tgt_tokens[id_tok] = tok
377
+ num_added_tgt += 1
378
+
379
+ if num_added_src == 0:
380
+ src_base, prev_src_base = prev_src_base, src_base
381
+
382
+ if num_added_tgt == 0:
383
+ tgt_base, prev_tgt_base = prev_tgt_base, tgt_base
384
+
385
+ for corr in curr_corrs:
386
+ mapped_corrections.append({
387
+ "idx_src": list(map(lambda _i: src_base + _i, corr["idx_src"])),
388
+ "idx_tgt": list(map(lambda _i: tgt_base + _i, corr["idx_tgt"])),
389
+ "corr_types": corr["corr_types"]
390
+ })
391
+
392
+ src_base += num_added_src
393
+ tgt_base += num_added_tgt
394
+
395
+ if num_added_src == 0:
396
+ src_base, prev_src_base = prev_src_base, src_base
397
+
398
+ if num_added_tgt == 0:
399
+ tgt_base, prev_tgt_base = prev_tgt_base, tgt_base
400
+
401
+ yield uniq_idx_par, {
402
+ "id_doc": curr_id,
403
+ "doc_title": doc_title,
404
+ "is_manually_validated": is_validated,
405
+ "src_tokens": src_tokens,
406
+ "src_ling_annotations": src_ling_anns,
407
+ "tgt_tokens": tgt_tokens,
408
+ "tgt_ling_annotations": tgt_ling_anns,
409
+ "corrections": mapped_corrections
410
+ }
411
+ uniq_idx_par += 1
412
+
413
  @staticmethod
414
  def aggregate_docs(sent_level_data):
415
  # NOTE: assuming here that `sent_level_data` is pre-sorted by id_doc, which is done in the raw data
 
498
  sent_level_data = list(Solar3.generate_sentences(source_path, target_path, links_path))
499
 
500
  if self.config.name == "sentence_level":
501
+ # Remove IDs and indices that are only useful for aggregating the document-level data
502
  for i, instance in sent_level_data:
503
+ yield i, {_k: _v for _k, _v in instance.items() if _k not in {"id_src_tokens", "id_tgt_tokens",
504
+ "idx_src_par", "idx_tgt_par"}}
505
+ elif self.config.name == "paragraph_level":
506
+ yield from list(Solar3.aggregate_pars(sent_level_data))
507
  else:
508
  yield from list(Solar3.aggregate_docs(sent_level_data))
509