Datasets:

ArXiv:
License:
language_identification / language_identification.py
HoneyTian's picture
update
6c980bf
raw
history blame
4.16 kB
#!/usr/bin/python3
# -*- coding: utf-8 -*-
from glob import glob
import json
import os
from pathlib import Path
import datasets
_URLS = {
"amazon_reviews_multi": "data/amazon_reviews_multi.jsonl",
"mike0307": "data/mike0307.jsonl",
"nbnn": "data/nbnn.jsonl",
"nordic_langid": "data/nordic_langid.jsonl",
"scandi_langid": "data/scandi_langid.jsonl",
"stsb_multi_mt": "data/stsb_multi_mt.jsonl",
"xnli": "data/xnli.jsonl",
}
_CITATION = """\
@dataset{language_identification,
author = {Xing Tian},
title = {language_identification},
month = aug,
year = 2024,
publisher = {Xing Tian},
version = {1.0},
}
"""
LANGUAGE_MAP = {
"ar": "arabic",
"bg": "bulgarian",
"da": "danish",
"de": "german",
"el": "modern greek",
"en": "english",
"eo": "esperanto",
"es": "spanish",
"fo": "faroese",
"fr": "french",
"gl": "galician",
"hi": "hindi",
"is": "icelandic",
"it": "italian",
"ja": "japanese",
"nl": "dutch",
"no": "norwegian",
"no-b": "norwegian (bokmål)",
"no-n": "norwegian (nynorsk)",
"mr": "marathi",
"pl": "polish",
"pt": "portuguese",
"ru": "russian",
"sw": "swahili",
"sv": "swedish",
"th": "thai",
"tr": "turkish",
"ur": "urdu",
"vi": "vietnamese",
"zh-cn": "simplified chinese",
"zh-tw": "traditional chinese",
}
class LanguageIdentification(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="amazon_reviews_multi", version=VERSION, description="amazon_reviews_multi"),
datasets.BuilderConfig(name="mike0307", version=VERSION, description="mike0307"),
datasets.BuilderConfig(name="nbnn", version=VERSION, description="nbnn"),
datasets.BuilderConfig(name="nordic_langid", version=VERSION, description="nordic_langid"),
datasets.BuilderConfig(name="scandi_langid", version=VERSION, description="scandi_langid"),
datasets.BuilderConfig(name="stsb_multi_mt", version=VERSION, description="stsb_multi_mt"),
datasets.BuilderConfig(name="xnli", version=VERSION, description="xnli"),
]
def _info(self):
features = datasets.Features(
{
"text": datasets.Value("string"),
"language": datasets.Value("string"),
"data_source": datasets.Value("string"),
}
)
return datasets.DatasetInfo(
features=features,
supervised_keys=None,
homepage="",
license="",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
url = _URLS[self.config.name]
dl_path = dl_manager.download(url)
archive_path = dl_path
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"archive_path": archive_path, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"archive_path": archive_path, "split": "validation"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"archive_path": archive_path, "split": "test"},
),
]
def _generate_examples(self, archive_path, split):
archive_path = Path(archive_path)
idx = 0
with open(archive_path, "r", encoding="utf-8") as f:
for row in f:
sample = json.loads(row)
if sample["split"] != split:
continue
language = sample["language"]
if language not in LANGUAGE_MAP.keys():
raise AssertionError(language)
yield idx, {
"text": sample["text"],
"language": language,
"data_source": sample["data_source"],
}
idx += 1
if __name__ == '__main__':
pass