#!/usr/bin/python3 # -*- coding: utf-8 -*- from glob import glob import json import os from pathlib import Path import datasets _URLS = { "amazon_reviews_multi": "data/amazon_reviews_multi.jsonl", "autshumato": "data/autshumato.jsonl", "bsd_ja_en": "data/bsd_ja_en.jsonl", "bucc2018": "data/bucc2018.jsonl", "cmu_hinglish_dog": "data/cmu_hinglish_dog.jsonl", "europa_eac_tm": "data/europa_eac_tm.jsonl", "iwslt2017": "data/iwslt2017.jsonl", "mike0307": "data/mike0307.jsonl", "nbnn": "data/nbnn.jsonl", "nordic_langid": "data/nordic_langid.jsonl", "scandi_langid": "data/scandi_langid.jsonl", "stsb_multi_mt": "data/stsb_multi_mt.jsonl", "tatoeba": "data/tatoeba.jsonl", "xnli": "data/xnli.jsonl", } _CITATION = """\ @dataset{language_identification, author = {Xing Tian}, title = {language_identification}, month = aug, year = 2024, publisher = {Xing Tian}, version = {1.0}, } """ LANGUAGE_MAP = { "ar": "arabic", "bg": "bulgarian", "da": "danish", "de": "german", "el": "modern greek", "en": "english", "eo": "esperanto", "es": "spanish", "fo": "faroese", "fr": "french", "gl": "galician", "hi": "hindi", "hi_en": "hindi (english)", "is": "icelandic", "it": "italian", "ja": "japanese", "ko": "korean", "nl": "dutch", "no": "norwegian", "no-b": "norwegian (bokmål)", "no-n": "norwegian (nynorsk)", "mr": "marathi", "pl": "polish", "pt": "portuguese", "ro": "romanian", "ru": "russian", "sw": "swahili", "sv": "swedish", "th": "thai", "tn": "sepedi", "tr": "turkish", "ts": "dzonga", "ur": "urdu", "vi": "vietnamese", "zh": "chinese", "zh-cn": "simplified chinese", "zh-tw": "traditional chinese", "zu": "zulu, south africa", } class LanguageIdentification(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="amazon_reviews_multi", version=VERSION, description="amazon_reviews_multi"), datasets.BuilderConfig(name="autshumato", version=VERSION, description="autshumato"), datasets.BuilderConfig(name="bsd_ja_en", version=VERSION, description="bsd_ja_en"), datasets.BuilderConfig(name="bucc2018", version=VERSION, description="bucc2018"), datasets.BuilderConfig(name="cmu_hinglish_dog", version=VERSION, description="cmu_hinglish_dog"), datasets.BuilderConfig(name="europa_eac_tm", version=VERSION, description="europa_eac_tm"), datasets.BuilderConfig(name="iwslt2017", version=VERSION, description="iwslt2017"), datasets.BuilderConfig(name="mike0307", version=VERSION, description="mike0307"), datasets.BuilderConfig(name="nbnn", version=VERSION, description="nbnn"), datasets.BuilderConfig(name="nordic_langid", version=VERSION, description="nordic_langid"), datasets.BuilderConfig(name="scandi_langid", version=VERSION, description="scandi_langid"), datasets.BuilderConfig(name="stsb_multi_mt", version=VERSION, description="stsb_multi_mt"), datasets.BuilderConfig(name="tatoeba", version=VERSION, description="tatoeba"), datasets.BuilderConfig(name="xnli", version=VERSION, description="xnli"), ] def _info(self): features = datasets.Features( { "text": datasets.Value("string"), "language": datasets.Value("string"), "data_source": datasets.Value("string"), } ) return datasets.DatasetInfo( features=features, supervised_keys=None, homepage="", license="", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" url = _URLS[self.config.name] dl_path = dl_manager.download(url) archive_path = dl_path return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"archive_path": archive_path, "split": "validation"}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}, ), ] def _generate_examples(self, archive_path, split): archive_path = Path(archive_path) idx = 0 with open(archive_path, "r", encoding="utf-8") as f: for row in f: sample = json.loads(row) if sample["split"] != split: continue language = sample["language"] if language not in LANGUAGE_MAP.keys(): raise AssertionError(language) yield idx, { "text": sample["text"], "language": language, "data_source": sample["data_source"], } idx += 1 if __name__ == '__main__': pass