# coding=utf-8 # Source: https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py """ELRC-Medical-V2 : European parallel corpus for healthcare machine translation""" import os import csv import datasets from tqdm import tqdm logger = datasets.logging.get_logger(__name__) _CITATION = """ @inproceedings{losch-etal-2018-european, title = "European Language Resource Coordination: Collecting Language Resources for Public Sector Multilingual Information Management", author = {L{\"o}sch, Andrea and Mapelli, Val{\'e}rie and Piperidis, Stelios and Vasi{\c{l}}jevs, Andrejs and Smal, Lilli and Declerck, Thierry and Schnur, Eileen and Choukri, Khalid and van Genabith, Josef}, booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)", month = may, year = "2018", address = "Miyazaki, Japan", publisher = "European Language Resources Association (ELRA)", url = "https://aclanthology.org/L18-1213", } """ _LANGUAGE_PAIRS = ["en-" + lang for lang in ["bg", "cs", "da", "de", "el", "es", "et", "fi", "fr", "ga", "hr", "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]] _LICENSE = """ This work is licensed under a Attribution 4.0 International (CC BY 4.0) License. """ # _URLS = { # lang : "https://huggingface.co/datasets/qanastek/ELRC-Medical-V2/raw/main/csv/" + lang + ".csv" for lang in _LANGUAGE_PAIRS # } _URL = "https://huggingface.co/datasets/qanastek/ELRC-Medical-V2/resolve/main/ELRC-Medical-V2.zip" # _URL = "https://raw.githubusercontent.com/qanastek/ELRC-Medical-V2/main/csv_corpus/" _DESCRIPTION = "No description" class ELRC_Medical_V2(datasets.GeneratorBasedBuilder): """ELRC-Medical-V2 dataset.""" BUILDER_CONFIGS = [ datasets.BuilderConfig(name=name, version=datasets.Version("2.0.0"), description="The ELRC-Medical-V2 corpora") for name in _LANGUAGE_PAIRS ] DEFAULT_CONFIG_NAME = "en-fr" def _info(self): src, target = self.config.name.split("-") pair = (src, target) return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( {"translation": datasets.features.Translation(languages=pair)} ), supervised_keys=(src, target), # features=datasets.Features({ # "doc_id": datasets.Value("int32"), # "lang": datasets.Value("string"), # "source_text": datasets.Value("string"), # "target_text": datasets.Value("string"), # }), # supervised_keys=None, homepage="https://github.com/qanastek/ELRC-Medical-V2/", citation=_CITATION, license=_LICENSE, ) def _split_generators(self, dl_manager): # Download the CSV data_dir = dl_manager.download_and_extract(_URL) # data_dir = dl_manager.download(_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": data_dir + "/" + self.config.name + ".csv", "split": "train", } ), ] def _generate_examples(self, filepath, split): logger.info("⏳ Generating examples from = %s", filepath) key_ = 0 with open(filepath, encoding="utf-8") as f: for id_, row in enumerate(csv.reader(f, delimiter=',')): if id_ == 0: continue # Get langue pair src, target = str(row[1]).split("-") yield key_, { "translation": { src: str(row[2]).strip(), target: str(row[3]).strip(), }, } # yield key_, { # "doc_id": int(row[0]), # "lang": str(row[1]), # "source_text": str(row[2]), # "target_text": str(row[3]) # } key_ += 1