hinglish-dump / hinglish-dump.py
Diwank Singh
Update config file
ba6ae8c
# hinglish-dump.py
## About: This is a dataset script for diwank/silicone-merged
## Docs: https://huggingface.co/docs/datasets/dataset_script.html
"""Raw merged dump of Hinglish (hi-EN) datasets."""
import pandas as pd
import os
import datasets
_DESCRIPTION = """\
Raw merged dump of Hinglish (hi-EN) datasets.
"""
_HOMEPAGE = "https://huggingface.co/datasets/diwank/hinglish-dump"
_LICENSE = "MIT"
_URLS = {
subset: f"{_HOMEPAGE}/resolve/main/data/{subset}/data.h5"
for subset in "crowd_transliteration hindi_romanized_dump hindi_xlit hinge hinglish_norm news2018".split() }
_FEATURE_NAMES = [
"target_hinglish",
"source_hindi",
"parallel_english",
"annotations",
"raw_input",
"alternates",
]
config_names = _URLS.keys()
version = datasets.Version("1.0.0")
class HinglishDumpDataset(datasets.GeneratorBasedBuilder):
"""Raw merged dump of Hinglish (hi-EN) datasets."""
VERSION = version
CONFIGS = config_names
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=subset, version=version, description=f"Config for {subset}")
for subset in config_names
]
DEFAULT_CONFIG_NAME = None
def _info(self):
features = datasets.Features({
feature: datasets.Value("string")
for feature in _FEATURE_NAMES
})
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
)
def _split_generators(self, dl_manager):
# dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
# It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
# By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
urls = _URLS[self.config.name]
filepath = self.data_dir = dl_manager.download_and_extract(urls)
return [
datasets.SplitGenerator(
name=getattr(datasets.Split, "VALIDATION" if split == "eval" else split.upper()),
gen_kwargs=dict(filepath=filepath, split=split) )
for split in ["train", "eval", "test"]
]
def _generate_examples(self, filepath, split):
df = pd.read_hdf(filepath, key=split)
for i, row in enumerate(df.to_dict('records')):
yield i, row