#!/usr/bin/python3 # -*- coding: utf-8 -*- from glob import glob import json import os from pathlib import Path import datasets _URLS = { "amazon_reviews_multi": "data/amazon_reviews_multi.jsonl", "autshumato": "data/autshumato.jsonl", "bible_para": "data/bible_para.jsonl", "bsd_ja_en": "data/bsd_ja_en.jsonl", "bucc2018": "data/bucc2018.jsonl", "cmu_hinglish_dog": "data/cmu_hinglish_dog.jsonl", "ecb": "data/ecb.jsonl", "emea": "data/emea.jsonl", "europa_eac_tm": "data/europa_eac_tm.jsonl", "europa_ecdc_tm": "data/europa_ecdc_tm.jsonl", "hind_encorp": "data/hind_encorp.jsonl", "hrenwac_para": "data/hrenwac_para.jsonl", "id_panl_bppt": "data/id_panl_bppt.jsonl", "iwslt2017": "data/iwslt2017.jsonl", "kde4": "data/kde4.jsonl", "menyo20k_mt": "data/menyo20k_mt.jsonl", "mike0307": "data/mike0307.jsonl", "multi_para_crawl": "data/multi_para_crawl.jsonl", "nbnn": "data/nbnn.jsonl", "nordic_langid": "data/nordic_langid.jsonl", "open_subtitles": "data/open_subtitles.jsonl", # "para_crawl": "data/para_crawl.jsonl", "para_pat": "data/para_pat.jsonl", "php": "data/php.jsonl", "scandi_langid": "data/scandi_langid.jsonl", "stsb_multi_mt": "data/stsb_multi_mt.jsonl", "tatoeba": "data/tatoeba.jsonl", "xnli": "data/xnli.jsonl", } _CITATION = """\ @dataset{language_identification, author = {Xing Tian}, title = {language_identification}, month = aug, year = 2024, publisher = {Xing Tian}, version = {1.0}, } """ LANGUAGE_MAP = { "ar": "arabic", "bg": "bulgarian", "bn": "bengali", "bs": "bosnian", "cs": "czech", "da": "danish", "de": "german", "el": "modern greek", "en": "english", "eo": "esperanto", "es": "spanish", "et": "estonian", "fi": "finnish", "fo": "faroese", "fr": "french", "ga": "irish", "gl": "galician", "hi": "hindi", "hi_en": "hindi english", "hr": "croatian", "hu": "hungarian", "hy": "armenian", "id": "indonesian", "is": "icelandic", "it": "italian", "ja": "japanese", "ko": "korean", "lt": "lithuanian", "lv": "latvian", "mr": "marathi", "mt": "maltese", "nl": "dutch", "no": "norwegian", "no-b": "norwegian (bokmål)", "no-n": "norwegian (nynorsk)", "pl": "polish", "pt": "portuguese", "ro": "romanian", "ru": "russian", "sk": "slovak", "sl": "slovenian", "sw": "swahili", "sv": "swedish", "th": "thai", "tl": "tagalog", "tn": "sepedi", "tr": "turkish", "ts": "dzonga", "uk": "ukrainian", "ur": "urdu", "vi": "vietnamese", "yo": "yoruba", "zh": "chinese", "zh-cn": "simplified chinese", "zh-tw": "traditional chinese", "zu": "zulu, south africa", } class LanguageIdentification(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="amazon_reviews_multi", version=VERSION, description="amazon_reviews_multi"), datasets.BuilderConfig(name="autshumato", version=VERSION, description="autshumato"), datasets.BuilderConfig(name="bible_para", version=VERSION, description="bible_para"), datasets.BuilderConfig(name="bsd_ja_en", version=VERSION, description="bsd_ja_en"), datasets.BuilderConfig(name="bucc2018", version=VERSION, description="bucc2018"), datasets.BuilderConfig(name="cmu_hinglish_dog", version=VERSION, description="cmu_hinglish_dog"), datasets.BuilderConfig(name="ecb", version=VERSION, description="ecb"), datasets.BuilderConfig(name="emea", version=VERSION, description="emea"), datasets.BuilderConfig(name="europa_eac_tm", version=VERSION, description="europa_eac_tm"), datasets.BuilderConfig(name="europa_ecdc_tm", version=VERSION, description="europa_ecdc_tm"), datasets.BuilderConfig(name="hind_encorp", version=VERSION, description="hind_encorp"), datasets.BuilderConfig(name="hrenwac_para", version=VERSION, description="hrenwac_para"), datasets.BuilderConfig(name="id_panl_bppt", version=VERSION, description="id_panl_bppt"), datasets.BuilderConfig(name="iwslt2017", version=VERSION, description="iwslt2017"), datasets.BuilderConfig(name="kde4", version=VERSION, description="kde4"), datasets.BuilderConfig(name="menyo20k_mt", version=VERSION, description="menyo20k_mt"), datasets.BuilderConfig(name="mike0307", version=VERSION, description="mike0307"), datasets.BuilderConfig(name="multi_para_crawl", version=VERSION, description="multi_para_crawl"), datasets.BuilderConfig(name="nbnn", version=VERSION, description="nbnn"), datasets.BuilderConfig(name="nordic_langid", version=VERSION, description="nordic_langid"), datasets.BuilderConfig(name="open_subtitles", version=VERSION, description="open_subtitles"), # datasets.BuilderConfig(name="para_crawl", version=VERSION, description="para_crawl"), datasets.BuilderConfig(name="para_pat", version=VERSION, description="para_pat"), datasets.BuilderConfig(name="php", version=VERSION, description="php"), datasets.BuilderConfig(name="scandi_langid", version=VERSION, description="scandi_langid"), datasets.BuilderConfig(name="stsb_multi_mt", version=VERSION, description="stsb_multi_mt"), datasets.BuilderConfig(name="tatoeba", version=VERSION, description="tatoeba"), datasets.BuilderConfig(name="xnli", version=VERSION, description="xnli"), ] def _info(self): features = datasets.Features( { "text": datasets.Value("string"), "language": datasets.Value("string"), "data_source": datasets.Value("string"), } ) return datasets.DatasetInfo( features=features, supervised_keys=None, homepage="", license="", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" url = _URLS[self.config.name] dl_path = dl_manager.download(url) archive_path = dl_path return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"archive_path": archive_path, "split": "validation"}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}, ), ] def _generate_examples(self, archive_path, split): archive_path = Path(archive_path) idx = 0 with open(archive_path, "r", encoding="utf-8") as f: for row in f: sample = json.loads(row) if sample["split"] != split: continue language = sample["language"] if language not in LANGUAGE_MAP.keys(): raise AssertionError(language) yield idx, { "text": sample["text"], "language": language, "data_source": sample["data_source"], } idx += 1 if __name__ == '__main__': pass