# hinglish-dump.py ## About: This is a dataset script for diwank/silicone-merged ## Docs: https://huggingface.co/docs/datasets/dataset_script.html """Raw merged dump of Hinglish (hi-EN) datasets.""" import csv import os import datasets _DESCRIPTION = """\ Raw merged dump of Hinglish (hi-EN) datasets. """ _HOMEPAGE = "https://huggingface.co/datasets/diwank/hinglish-dump" _LICENSE = "MIT" _URLS = { "crowd_transliteration": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ "crowd_transliteration/crowd_transliterations.hi-en.txt", ])), "fire2013": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ "fire2013/HindiEnglish_FIRE2013_AnnotatedDev.txt", "fire2013/HindiEnglish_FIRE2013_Test_GT.txt", ])), "hindi_romanized_dump": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ "hindi_romanized_dump/hi_rom.txt", ])), "hindi_xlit": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ "hindi_xlit/HiEn_ann1_test.json", "hindi_xlit/HiEn_ann1_train.json", "hindi_xlit/HiEn_ann1_valid.json", ])), "hinge": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ "hinge/eval_human.csv", "hinge/eval_human.pkl", "hinge/train_human.csv", "hinge/train_human.pkl", "hinge/train_synthetic.csv", "hinge/eval_synthetic.csv", ])), "hinglish_norm": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ "hinglish_norm/hinglishNorm_trainSet.json", ])), "news2018": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ "news2018/NEWS2018_M-EnHi_tst.xml", "news2018/NEWS2018_M-EnHi_trn.xml", "news2018/NEWS2018_M-EnHi_dev.xml", ])), } config_names = _URLS.keys() version = datasets.Version("1.0.0") class HinglishDumpDataset(datasets.GeneratorBasedBuilder): """Raw merged dump of Hinglish (hi-EN) datasets.""" VERSION = version CONFIGS = config_names BUILDER_CONFIGS = [ datasets.BuilderConfig(name=subset, version=version, description=f"Config for {subset}") for subset in config_names ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features(), homepage=_HOMEPAGE, license=_LICENSE, ) def _split_generators(self, dl_manager): # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive data_dir = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": None, "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": None, "split": "validation"}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": None, "split": "test"}, ), ] def _generate_examples(self, filepath, split): return None