File size: 3,472 Bytes
26753ef 28d194f 26753ef cd0b97a 26753ef cd0b97a 26753ef 467f601 cd0b97a 467f601 26753ef 467f601 22d50f1 3591636 22d50f1 682c91b 3591636 22d50f1 3591636 467f601 3591636 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
# hinglish-dump.py
## About: This is a dataset script for diwank/silicone-merged
## Docs: https://huggingface.co/docs/datasets/dataset_script.html
"""Raw merged dump of Hinglish (hi-EN) datasets."""
import csv
import os
import datasets
_DESCRIPTION = """\
Raw merged dump of Hinglish (hi-EN) datasets.
"""
_HOMEPAGE = "https://huggingface.co/datasets/diwank/hinglish-dump"
_LICENSE = "MIT"
_URLS = {
"crowd_transliteration": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
"crowd_transliteration/crowd_transliterations.hi-en.txt",
])),
"fire2013": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
"fire2013/HindiEnglish_FIRE2013_AnnotatedDev.txt",
"fire2013/HindiEnglish_FIRE2013_Test_GT.txt",
])),
"hindi_romanized_dump": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
"hindi_romanized_dump/hi_rom.txt",
])),
"hindi_xlit": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
"hindi_xlit/HiEn_ann1_test.json",
"hindi_xlit/HiEn_ann1_train.json",
"hindi_xlit/HiEn_ann1_valid.json",
])),
"hinge": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
"hinge/eval_human.csv",
"hinge/eval_human.pkl",
"hinge/train_human.csv",
"hinge/train_human.pkl",
"hinge/train_synthetic.csv",
"hinge/eval_synthetic.csv",
])),
"hinglish_norm": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
"hinglish_norm/hinglishNorm_trainSet.json",
])),
"news2018": list(map(lambda x: f"{_HOMEPAGE}/resolve/main/data/{x}" , [
"news2018/NEWS2018_M-EnHi_tst.xml",
"news2018/NEWS2018_M-EnHi_trn.xml",
"news2018/NEWS2018_M-EnHi_dev.xml",
])),
}
config_names = _URLS.keys()
version = datasets.Version("1.0.0")
class HinglishDumpDataset(datasets.GeneratorBasedBuilder):
"""Raw merged dump of Hinglish (hi-EN) datasets."""
VERSION = version
CONFIGS = config_names
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=subset, version=version, description=f"Config for {subset}")
for subset in config_names
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(),
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
data_dir = dl_manager.download_and_extract(_URLS)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": None, "split": "train"},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"filepath": None, "split": "validation"},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": None, "split": "test"},
),
]
def _generate_examples(self, filepath, split):
return None |