import datasets import pandas as pd _CITATION = """\ @InProceedings{huggingface:dataset, title = {spam-text-messages-dataset}, author = {TrainingDataPro}, year = {2023} } """ _DESCRIPTION = """\ The dataset consisting of garbage cans of various capacities and types. Best to train a neural network to monitor the timely removal of garbage and organize the logistics of vehicles for garbage collection. Dataset is useful for the recommendation systems, optimization and automization the work of community services, smart city. """ _NAME = 'spam-text-messages-dataset' _HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}" _LICENSE = "" _DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/" class SpamTextMessagesDataset(datasets.GeneratorBasedBuilder): """Small sample of image-text pairs""" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features({ 'image': datasets.Image(), 'text': datasets.Value('string') }), supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): images = dl_manager.download(f"{_DATA}images.tar.gz") annotations = dl_manager.download(f"{_DATA}{_NAME}.csv") images = dl_manager.iter_archive(images) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={ "images": images, 'annotations': annotations }), ] def _generate_examples(self, images, annotations): annotations_df = pd.read_csv(annotations, engine='python') for idx, (image_path, image) in enumerate(images): yield idx, { "image": { "path": image_path, "bytes": image.read() }, 'text': annotations_df.loc[annotations_df['image'].str.lower() == image_path.lower()]['text'].values[0] }