Update DEFT2021.py
Browse files- DEFT2021.py +70 -54
DEFT2021.py
CHANGED
@@ -585,57 +585,73 @@ class DEFT2021(datasets.GeneratorBasedBuilder):
|
|
585 |
|
586 |
elif self.config.name.find("ner") != -1:
|
587 |
|
588 |
-
|
589 |
-
|
590 |
-
|
591 |
-
|
592 |
-
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
|
603 |
-
|
604 |
-
|
605 |
-
|
606 |
-
|
607 |
-
|
608 |
-
|
609 |
-
|
610 |
-
|
611 |
-
|
612 |
-
|
613 |
-
|
614 |
-
|
615 |
-
|
616 |
-
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
-
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
|
628 |
-
|
629 |
-
|
630 |
-
|
631 |
-
|
632 |
-
|
633 |
-
|
634 |
-
|
635 |
-
|
636 |
-
|
637 |
-
|
638 |
-
|
639 |
-
|
640 |
-
|
641 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
585 |
|
586 |
elif self.config.name.find("ner") != -1:
|
587 |
|
588 |
+
print("Inside NER")
|
589 |
+
|
590 |
+
all_res = []
|
591 |
+
|
592 |
+
key = 0
|
593 |
+
|
594 |
+
with open(os.path.join(data_dir, 'distribution-corpus.txt')) as f_dist:
|
595 |
+
|
596 |
+
print("T1")
|
597 |
+
|
598 |
+
distribution = [line.strip() for line in f_dist.readlines()]
|
599 |
+
|
600 |
+
random.seed(4)
|
601 |
+
train = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'train 2021']
|
602 |
+
random.shuffle(train)
|
603 |
+
random.shuffle(train)
|
604 |
+
random.shuffle(train)
|
605 |
+
train, validation = np.split(train, [int(len(train)*0.73)])
|
606 |
+
test = [raw.split('\t')[0] for raw in distribution if len(raw.split('\t')) == 4 and raw.split('\t')[3] == 'test 2021']
|
607 |
+
|
608 |
+
print("T2")
|
609 |
+
ann_path = Path(data_dir) / "DEFT-cas-cliniques"
|
610 |
+
|
611 |
+
for guid, txt_file in enumerate(sorted(ann_path.glob("*.txt"))):
|
612 |
+
|
613 |
+
print("T3")
|
614 |
+
|
615 |
+
brat_example = self.parse_brat_file(txt_file, parse_notes=True)
|
616 |
+
|
617 |
+
source_example = self._to_source_example(brat_example)
|
618 |
+
|
619 |
+
print("T4")
|
620 |
+
|
621 |
+
prod_format = self.convert_to_prodigy(source_example, _LABELS_BASE)
|
622 |
+
|
623 |
+
print("T5")
|
624 |
+
|
625 |
+
hf_format = self.convert_to_hf_format(prod_format)
|
626 |
+
|
627 |
+
print("T6")
|
628 |
+
|
629 |
+
hf_split = self.split_sentences(hf_format)
|
630 |
+
|
631 |
+
print("T7")
|
632 |
+
|
633 |
+
for h in hf_split:
|
634 |
+
|
635 |
+
all_res.append({
|
636 |
+
"id": str(key),
|
637 |
+
"document_id": h['document_id'],
|
638 |
+
"tokens": h['tokens'],
|
639 |
+
"ner_tags": h['ner_tags'],
|
640 |
+
})
|
641 |
+
|
642 |
+
key += 1
|
643 |
+
|
644 |
+
if split == "train":
|
645 |
+
allowed_ids = list(train)
|
646 |
+
elif split == "validation":
|
647 |
+
allowed_ids = list(validation)
|
648 |
+
elif split == "test":
|
649 |
+
allowed_ids = list(test)
|
650 |
+
|
651 |
+
print("train", len(train))
|
652 |
+
print("validation", len(validation))
|
653 |
+
print("test", len(test))
|
654 |
+
|
655 |
+
for r in all_res:
|
656 |
+
if r["document_id"]+'.txt' in allowed_ids:
|
657 |
+
yield r["id"], r
|