Datasets:
File size: 3,605 Bytes
6a5e475 322a677 eb1d4aa 322a677 b71ab23 eb1d4aa 6a5e475 2aa6c17 6a5e475 2aa6c17 6a5e475 cc9ae4a 322a677 6a5e475 322a677 6a5e475 b9b1542 6a5e475 484f2ea 9e6d05a 484f2ea 9e6d05a 6a5e475 484f2ea 2aa6c17 484f2ea 6a5e475 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
# coding=utf-8
# Source: https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
"""ELRC-Medical-V2 : European parallel corpus for healthcare machine translation"""
import os
import csv
import datasets
from tqdm import tqdm
logger = datasets.logging.get_logger(__name__)
_CITATION = """
@inproceedings{losch-etal-2018-european,
title = "European Language Resource Coordination: Collecting Language Resources for Public Sector Multilingual Information Management",
author = {L{\"o}sch, Andrea and
Mapelli, Val{\'e}rie and
Piperidis, Stelios and
Vasi{\c{l}}jevs, Andrejs and
Smal, Lilli and
Declerck, Thierry and
Schnur, Eileen and
Choukri, Khalid and
van Genabith, Josef},
booktitle = "Proceedings of the Eleventh International Conference on Language Resources and Evaluation ({LREC} 2018)",
month = may,
year = "2018",
address = "Miyazaki, Japan",
publisher = "European Language Resources Association (ELRA)",
url = "https://aclanthology.org/L18-1213",
}
"""
_LANGUAGE_PAIRS = ["en-" + lang for lang in ["bg", "cs", "da", "de", "el", "es", "et", "fi", "fr", "ga", "hr", "hu", "it", "lt", "lv", "mt", "nl", "pl", "pt", "ro", "sk", "sl", "sv"]]
_LICENSE = """
This work is licensed under a <a rel="license" href="https://elrc-share.eu/static/metashare/licences/CC-BY-4.0.pdf">Attribution 4.0 International (CC BY 4.0) License</a>.
"""
# _URLS = {
# lang : "https://huggingface.co/datasets/qanastek/ELRC-Medical-V2/raw/main/csv/" + lang + ".csv" for lang in _LANGUAGE_PAIRS
# }
_URL = "https://raw.githubusercontent.com/qanastek/ELRC-Medical-V2/main/csv_corpus/"
# _URL = "https://raw.githubusercontent.com/qanastek/ELRC-Medical-V2/main/csv_corpus/"
_DESCRIPTION = "No description"
class ELRC_Medical_V2(datasets.GeneratorBasedBuilder):
"""ELRC-Medical-V2 dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=name, version=datasets.Version("2.0.0"), description="The ELRC-Medical-V2 corpora") for name in _LANGUAGE_PAIRS
]
DEFAULT_CONFIG_NAME = "en-fr"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"doc_id": datasets.Value("int32"),
"lang": datasets.Value("string"),
"source_text": datasets.Value("large_string"),
"target_text": datasets.Value("large_string"),
}),
supervised_keys=None,
homepage="https://github.com/qanastek/ELRC-Medical-V2/",
citation=_CITATION,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# Download the CSV
data_dir = dl_manager.download(_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": data_dir + self.config.name + ".csv",
"split": "train",
}
),
]
def _generate_examples(self, filepath, split):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
for id_, row in enumerate(csv.reader(f, delimiter=',')):
if id_ == 0:
continue
yield id_, {
"doc_id": int(row[0]),
"lang": str(row[1]),
"source_text": str(row[2]),
"target_text": str(row[3])
}
|