Datasets:

ArXiv:
License:
language_identification / language_identification.py
HoneyTian's picture
update
80f22ba
raw
history blame contribute delete
No virus
14.2 kB
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from glob import glob
import json
import os
from pathlib import Path
import datasets
_URLS = {
"amazon_reviews_multi": "data/amazon_reviews_multi.jsonl",
"autshumato": "data/autshumato.jsonl",
"bible_para": "data/bible_para.jsonl",
"bsd_ja_en": "data/bsd_ja_en.jsonl",
"bucc2018": "data/bucc2018.jsonl",
"cmu_hinglish_dog": "data/cmu_hinglish_dog.jsonl",
"ecb": "data/ecb.jsonl",
"emea": "data/emea.jsonl",
"europa_eac_tm": "data/europa_eac_tm.jsonl",
"europa_ecdc_tm": "data/europa_ecdc_tm.jsonl",
"hind_encorp": "data/hind_encorp.jsonl",
"hrenwac_para": "data/hrenwac_para.jsonl",
"id_panl_bppt": "data/id_panl_bppt.jsonl",
"iwslt2017": "data/iwslt2017.jsonl",
"kde4": "data/kde4.jsonl",
"menyo20k_mt": "data/menyo20k_mt.jsonl",
"mike0307": "data/mike0307.jsonl",
"multi_para_crawl": "data/multi_para_crawl.jsonl",
"nbnn": "data/nbnn.jsonl",
"nordic_langid": "data/nordic_langid.jsonl",
"open_subtitles": "data/open_subtitles.jsonl",
"para_crawl_en_bg": "data/para_crawl_en_bg.jsonl",
"para_crawl_en_cs": "data/para_crawl_en_cs.jsonl",
"para_crawl_en_da": "data/para_crawl_en_da.jsonl",
"para_crawl_en_de": "data/para_crawl_en_de.jsonl",
"para_crawl_en_el": "data/para_crawl_en_el.jsonl",
"para_crawl_en_es": "data/para_crawl_en_es.jsonl",
"para_crawl_en_et": "data/para_crawl_en_et.jsonl",
"para_crawl_en_fi": "data/para_crawl_en_fi.jsonl",
"para_crawl_en_fr": "data/para_crawl_en_fr.jsonl",
"para_crawl_en_ga": "data/para_crawl_en_ga.jsonl",
"para_crawl_en_hr": "data/para_crawl_en_hr.jsonl",
"para_crawl_en_hu": "data/para_crawl_en_hu.jsonl",
"para_crawl_en_it": "data/para_crawl_en_it.jsonl",
"para_crawl_en_lt": "data/para_crawl_en_lt.jsonl",
"para_crawl_en_lv": "data/para_crawl_en_lv.jsonl",
"para_crawl_en_mt": "data/para_crawl_en_mt.jsonl",
"para_crawl_en_nl": "data/para_crawl_en_nl.jsonl",
"para_crawl_en_pl": "data/para_crawl_en_pl.jsonl",
"para_crawl_en_pt": "data/para_crawl_en_pt.jsonl",
"para_crawl_en_ro": "data/para_crawl_en_ro.jsonl",
"para_crawl_en_sk": "data/para_crawl_en_sk.jsonl",
"para_crawl_en_sl": "data/para_crawl_en_sl.jsonl",
"para_crawl_en_sv": "data/para_crawl_en_sv.jsonl",
"para_pat_cs_en": "data/para_pat_cs_en.jsonl",
"para_pat_de_en": "data/para_pat_de_en.jsonl",
"para_pat_de_fr": "data/para_pat_de_fr.jsonl",
"para_pat_el_en": "data/para_pat_el_en.jsonl",
"para_pat_en_es": "data/para_pat_en_es.jsonl",
"para_pat_en_hu": "data/para_pat_en_hu.jsonl",
"para_pat_en_ja": "data/para_pat_en_ja.jsonl",
"para_pat_en_ko": "data/para_pat_en_ko.jsonl",
"para_pat_en_pt": "data/para_pat_en_pt.jsonl",
"para_pat_en_ro": "data/para_pat_en_ro.jsonl",
"para_pat_en_ru": "data/para_pat_en_ru.jsonl",
"para_pat_en_sk": "data/para_pat_en_sk.jsonl",
"para_pat_en_uk": "data/para_pat_en_uk.jsonl",
"para_pat_en_zh": "data/para_pat_en_zh.jsonl",
"para_pat_es_fr": "data/para_pat_es_fr.jsonl",
"para_pat_fr_ja": "data/para_pat_fr_ja.jsonl",
"para_pat_fr_ko": "data/para_pat_fr_ko.jsonl",
"para_pat_fr_ru": "data/para_pat_fr_ru.jsonl",
"php": "data/php.jsonl",
"qed_amara": "data/qed_amara.jsonl",
"ro_sts_parallel": "data/ro_sts_parallel.jsonl",
"scandi_langid": "data/scandi_langid.jsonl",
"spc": "data/spc.jsonl",
"stsb_multi_mt": "data/stsb_multi_mt.jsonl",
"tatoeba": "data/tatoeba.jsonl",
"xnli": "data/xnli.jsonl",
}
_CITATION = """\
@dataset{language_identification,
author = {Xing Tian},
title = {language_identification},
month = aug,
year = 2024,
publisher = {Xing Tian},
version = {1.0},
}
"""
LANGUAGE_MAP = {
"af": "boolean (afrikaans)",
"ar": "arabic",
"bg": "bulgarian",
"bn": "bengali",
"bs": "bosnian",
"cs": "czech",
"da": "danish",
"de": "german",
"el": "modern greek",
"en": "english",
"eo": "esperanto",
"es": "spanish",
"et": "estonian",
"fi": "finnish",
"fo": "faroese",
"fr": "french",
"ga": "irish",
"gl": "galician",
"gu": "gujarati",
"he": "hebrew",
"hi": "hindi",
"hi_en": "hindi english",
"hr": "croatian",
"hu": "hungarian",
"hy": "armenian",
"id": "indonesian",
"is": "icelandic",
"it": "italian",
"ja": "japanese",
"ko": "korean",
"kk": "kazakh",
"lt": "lithuanian",
"lv": "latvian",
"mr": "marathi",
"mt": "maltese",
"nl": "dutch",
"no": "norwegian",
"no-b": "norwegian (bokmål)",
"no-n": "norwegian (nynorsk)",
"pl": "polish",
"pt": "portuguese",
"ro": "romanian",
"ru": "russian",
"sk": "slovak",
"sl": "slovenian",
"sw": "swahili",
"sv": "swedish",
"th": "thai",
"tl": "tagalog",
"tn": "serpeti",
"tr": "turkish",
"ts": "dzonga",
"uk": "ukrainian",
"ur": "urdu",
"vi": "vietnamese",
"yo": "yoruba",
"zh": "chinese",
"zh-cn": "simplified chinese",
"zh-tw": "traditional chinese",
"zu": "zulu, south africa",
}
class LanguageIdentification(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="amazon_reviews_multi", version=VERSION, description="amazon_reviews_multi"),
datasets.BuilderConfig(name="autshumato", version=VERSION, description="autshumato"),
datasets.BuilderConfig(name="bible_para", version=VERSION, description="bible_para"),
datasets.BuilderConfig(name="bsd_ja_en", version=VERSION, description="bsd_ja_en"),
datasets.BuilderConfig(name="bucc2018", version=VERSION, description="bucc2018"),
datasets.BuilderConfig(name="cmu_hinglish_dog", version=VERSION, description="cmu_hinglish_dog"),
datasets.BuilderConfig(name="ecb", version=VERSION, description="ecb"),
datasets.BuilderConfig(name="emea", version=VERSION, description="emea"),
datasets.BuilderConfig(name="europa_eac_tm", version=VERSION, description="europa_eac_tm"),
datasets.BuilderConfig(name="europa_ecdc_tm", version=VERSION, description="europa_ecdc_tm"),
datasets.BuilderConfig(name="hind_encorp", version=VERSION, description="hind_encorp"),
datasets.BuilderConfig(name="hrenwac_para", version=VERSION, description="hrenwac_para"),
datasets.BuilderConfig(name="id_panl_bppt", version=VERSION, description="id_panl_bppt"),
datasets.BuilderConfig(name="iwslt2017", version=VERSION, description="iwslt2017"),
datasets.BuilderConfig(name="kde4", version=VERSION, description="kde4"),
datasets.BuilderConfig(name="menyo20k_mt", version=VERSION, description="menyo20k_mt"),
datasets.BuilderConfig(name="mike0307", version=VERSION, description="mike0307"),
datasets.BuilderConfig(name="multi_para_crawl", version=VERSION, description="multi_para_crawl"),
datasets.BuilderConfig(name="nbnn", version=VERSION, description="nbnn"),
datasets.BuilderConfig(name="nordic_langid", version=VERSION, description="nordic_langid"),
datasets.BuilderConfig(name="open_subtitles", version=VERSION, description="open_subtitles"),
datasets.BuilderConfig(name="para_crawl_en_bg", version=VERSION, description="para_crawl_en_bg"),
datasets.BuilderConfig(name="para_crawl_en_cs", version=VERSION, description="para_crawl_en_cs"),
datasets.BuilderConfig(name="para_crawl_en_da", version=VERSION, description="para_crawl_en_da"),
datasets.BuilderConfig(name="para_crawl_en_de", version=VERSION, description="para_crawl_en_de"),
datasets.BuilderConfig(name="para_crawl_en_el", version=VERSION, description="para_crawl_en_el"),
datasets.BuilderConfig(name="para_crawl_en_es", version=VERSION, description="para_crawl_en_es"),
datasets.BuilderConfig(name="para_crawl_en_et", version=VERSION, description="para_crawl_en_et"),
datasets.BuilderConfig(name="para_crawl_en_fi", version=VERSION, description="para_crawl_en_fi"),
datasets.BuilderConfig(name="para_crawl_en_fr", version=VERSION, description="para_crawl_en_fr"),
datasets.BuilderConfig(name="para_crawl_en_ga", version=VERSION, description="para_crawl_en_ga"),
datasets.BuilderConfig(name="para_crawl_en_hr", version=VERSION, description="para_crawl_en_hr"),
datasets.BuilderConfig(name="para_crawl_en_hu", version=VERSION, description="para_crawl_en_hu"),
datasets.BuilderConfig(name="para_crawl_en_it", version=VERSION, description="para_crawl_en_it"),
datasets.BuilderConfig(name="para_crawl_en_lt", version=VERSION, description="para_crawl_en_lt"),
datasets.BuilderConfig(name="para_crawl_en_lv", version=VERSION, description="para_crawl_en_lv"),
datasets.BuilderConfig(name="para_crawl_en_mt", version=VERSION, description="para_crawl_en_mt"),
datasets.BuilderConfig(name="para_crawl_en_nl", version=VERSION, description="para_crawl_en_nl"),
datasets.BuilderConfig(name="para_crawl_en_pl", version=VERSION, description="para_crawl_en_pl"),
datasets.BuilderConfig(name="para_crawl_en_pt", version=VERSION, description="para_crawl_en_pt"),
datasets.BuilderConfig(name="para_crawl_en_ro", version=VERSION, description="para_crawl_en_ro"),
datasets.BuilderConfig(name="para_crawl_en_sk", version=VERSION, description="para_crawl_en_sk"),
datasets.BuilderConfig(name="para_crawl_en_sl", version=VERSION, description="para_crawl_en_sl"),
datasets.BuilderConfig(name="para_crawl_en_sv", version=VERSION, description="para_crawl_en_sv"),
datasets.BuilderConfig(name="para_pat_cs_en", version=VERSION, description="para_pat_cs_en"),
datasets.BuilderConfig(name="para_pat_de_en", version=VERSION, description="para_pat_de_en"),
datasets.BuilderConfig(name="para_pat_de_fr", version=VERSION, description="para_pat_de_fr"),
datasets.BuilderConfig(name="para_pat_el_en", version=VERSION, description="para_pat_el_en"),
datasets.BuilderConfig(name="para_pat_en_es", version=VERSION, description="para_pat_en_es"),
datasets.BuilderConfig(name="para_pat_en_hu", version=VERSION, description="para_pat_en_hu"),
datasets.BuilderConfig(name="para_pat_en_ja", version=VERSION, description="para_pat_en_ja"),
datasets.BuilderConfig(name="para_pat_en_ko", version=VERSION, description="para_pat_en_ko"),
datasets.BuilderConfig(name="para_pat_en_pt", version=VERSION, description="para_pat_en_pt"),
datasets.BuilderConfig(name="para_pat_en_ro", version=VERSION, description="para_pat_en_ro"),
datasets.BuilderConfig(name="para_pat_en_ru", version=VERSION, description="para_pat_en_ru"),
datasets.BuilderConfig(name="para_pat_en_sk", version=VERSION, description="para_pat_en_sk"),
datasets.BuilderConfig(name="para_pat_en_uk", version=VERSION, description="para_pat_en_uk"),
datasets.BuilderConfig(name="para_pat_en_zh", version=VERSION, description="para_pat_en_zh"),
datasets.BuilderConfig(name="para_pat_es_fr", version=VERSION, description="para_pat_es_fr"),
datasets.BuilderConfig(name="para_pat_fr_ja", version=VERSION, description="para_pat_fr_ja"),
datasets.BuilderConfig(name="para_pat_fr_ko", version=VERSION, description="para_pat_fr_ko"),
datasets.BuilderConfig(name="para_pat_fr_ru", version=VERSION, description="para_pat_fr_ru"),
datasets.BuilderConfig(name="php", version=VERSION, description="php"),
datasets.BuilderConfig(name="qed_amara", version=VERSION, description="qed_amara"),
datasets.BuilderConfig(name="ro_sts_parallel", version=VERSION, description="ro_sts_parallel"),
datasets.BuilderConfig(name="scandi_langid", version=VERSION, description="scandi_langid"),
datasets.BuilderConfig(name="spc", version=VERSION, description="spc"),
datasets.BuilderConfig(name="stsb_multi_mt", version=VERSION, description="stsb_multi_mt"),
datasets.BuilderConfig(name="tatoeba", version=VERSION, description="tatoeba"),
datasets.BuilderConfig(name="xnli", version=VERSION, description="xnli"),
]
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"language": datasets.Value("string"),
"data_source": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
features=features,
supervised_keys=None,
homepage="",
license="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
url = _URLS[self.config.name]
dl_path = dl_manager.download(url)
archive_path = dl_path
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"archive_path": archive_path, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"archive_path": archive_path, "split": "validation"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"archive_path": archive_path, "split": "test"},
),
]
def _generate_examples(self, archive_path, split):
archive_path = Path(archive_path)
idx = 0
with open(archive_path, "r", encoding="utf-8") as f:
for row in f:
sample = json.loads(row)
if sample["split"] != split:
continue
language = sample["language"]
if language not in LANGUAGE_MAP.keys():
raise AssertionError(language)
yield idx, {
"text": sample["text"],
"language": language,
"data_source": sample["data_source"],
}
idx += 1
if __name__ == '__main__':
pass