File size: 2,261 Bytes
6dcdbe0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68d2b0d
6dcdbe0
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import datasets
import pandas as pd

_CITATION = """\
@InProceedings{huggingface:dataset,
title = {spam-text-messages-dataset},
author = {TrainingDataPro},
year = {2023}
}
"""

_DESCRIPTION = """\
The dataset consisting of garbage cans of various capacities and types.
Best to train a neural network to monitor the timely removal of garbage and
organize the logistics of vehicles for garbage collection. Dataset is useful
for the recommendation systems, optimization and automization the work of 
community services, smart city.
"""
_NAME = 'spam-text-messages-dataset'

_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"

_LICENSE = ""

_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"


class SpamTextMessagesDataset(datasets.GeneratorBasedBuilder):
    """Small sample of image-text pairs"""

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features({
                'image': datasets.Image(),
                'text': datasets.Value('string')
            }),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        images = dl_manager.download(f"{_DATA}images.tar.gz")
        annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
        images = dl_manager.iter_archive(images)
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN,
                                    gen_kwargs={
                                        "images": images,
                                        'annotations': annotations
                                    }),
        ]

    def _generate_examples(self, images, annotations):
        annotations_df = pd.read_csv(annotations, engine='python')

        for idx, (image_path, image) in enumerate(images):
            yield idx, {
                "image": {
                    "path": image_path,
                    "bytes": image.read()
                },
                'text':
                    annotations_df.loc[annotations_df['image'].str.lower() ==
                                       image_path.lower()]['text'].values[0]
            }