# silicone-merged.py ## About: This is a dataset script for diwank/silicone-merged ## Docs: https://huggingface.co/docs/datasets/dataset_script.html """Merged and simplified dialog act datasets from the silicone collection.""" import pandas as pd import os import datasets _DESCRIPTION = """\ Merged and simplified dialog act datasets from the silicone collection. """ _HOMEPAGE = "https://huggingface.co/datasets/diwank/silicone-merged" _LICENSE = "MIT" _URLS = { "default": dict( train="https://huggingface.co/datasets/diwank/silicone-merged/resolve/main/train.h5", validation="https://huggingface.co/datasets/diwank/silicone-merged/resolve/main/validation.h5", test="https://huggingface.co/datasets/diwank/silicone-merged/resolve/main/test.h5", ), "balanced": dict( train="https://huggingface.co/datasets/diwank/silicone-merged/resolve/main/balanced.h5", validation="https://huggingface.co/datasets/diwank/silicone-merged/resolve/main/validation.h5", test="https://huggingface.co/datasets/diwank/silicone-merged/resolve/main/test.h5", ) } class SiliconeMergedDataset(datasets.GeneratorBasedBuilder): """Merged and simplified dialog act datasets from the silicone collection.""" VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="default", version=VERSION, description="Default config"), datasets.BuilderConfig(name="balanced", version=VERSION, description="Balanced dataset config"), ] DEFAULT_CONFIG_NAME = "default" def _info(self): features = datasets.Features( { "text_a": datasets.Value("string"), "text_b": datasets.Value("string"), "labels": datasets.Value("int64") } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=("text_b", "labels"), homepage=_HOMEPAGE, license=_LICENSE, ) def _split_generators(self, dl_manager): # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files. # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive urls = _URLS[self.config.name] filepaths = dl_manager.download_and_extract(urls) return [ datasets.SplitGenerator( name=getattr(datasets.Split, split.upper()), # These kwargs will be passed to _generate_examples gen_kwargs=dict( filepath=filepaths[split], split=split, ), ) for split in ["train", "test", "validation"] ] def _generate_examples(self, filepath, split): df = pd.read_hdf(filepath, "data") for key, tuple in enumerate(df.itertuples(index=False)): yield key, tuple._asdict()