|
|
|
|
|
|
|
|
|
|
|
|
|
"""Raw merged dump of Hinglish (hi-EN) datasets.""" |
|
|
|
|
|
import csv |
|
import os |
|
|
|
import datasets |
|
|
|
_DESCRIPTION = """\ |
|
Raw merged dump of Hinglish (hi-EN) datasets. |
|
""" |
|
|
|
_HOMEPAGE = "https://huggingface.co/datasets/diwank/hinglish-dump" |
|
_LICENSE = "MIT" |
|
|
|
_URLS = { |
|
"crowd_transliteration": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ |
|
"crowd_transliteration/crowd_transliterations.hi-en.txt", |
|
])), |
|
"fire2013": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ |
|
"fire2013/HindiEnglish_FIRE2013_AnnotatedDev.txt", |
|
"fire2013/HindiEnglish_FIRE2013_Test_GT.txt", |
|
])), |
|
"hindi_romanized_dump": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ |
|
"hindi_romanized_dump/hi_rom.txt", |
|
])), |
|
"hindi_xlit": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ |
|
"hindi_xlit/HiEn_ann1_test.json", |
|
"hindi_xlit/HiEn_ann1_train.json", |
|
"hindi_xlit/HiEn_ann1_valid.json", |
|
])), |
|
"hinge": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ |
|
"hinge/eval_human.csv", |
|
"hinge/eval_human.pkl", |
|
"hinge/train_human.csv", |
|
"hinge/train_human.pkl", |
|
"hinge/train_synthetic.csv", |
|
"hinge/eval_synthetic.csv", |
|
])), |
|
"hinglish_norm": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ |
|
"hinglish_norm/hinglishNorm_trainSet.json", |
|
])), |
|
"news2018": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [ |
|
"news2018/NEWS2018_M-EnHi_tst.xml", |
|
"news2018/NEWS2018_M-EnHi_trn.xml", |
|
"news2018/NEWS2018_M-EnHi_dev.xml", |
|
])), |
|
} |
|
|
|
config_names = _URLS.keys() |
|
version = datasets.Version("1.0.0") |
|
|
|
class HinglishDumpDataset(datasets.GeneratorBasedBuilder): |
|
"""Raw merged dump of Hinglish (hi-EN) datasets.""" |
|
|
|
VERSION = version |
|
CONFIGS = config_names |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name=subset, version=version, description=f"Config for {subset}") |
|
for subset in config_names |
|
] |
|
|
|
def _info(self): |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features(), |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
|
|
|
|
|
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.ALL, |
|
), |
|
] |
|
|