vkashko commited on
Commit
6dcdbe0
1 Parent(s): 2db7dba

feat: script

Browse files
Files changed (1) hide show
  1. spam-text-messages-dataset.py +67 -0
spam-text-messages-dataset.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import pandas as pd
3
+
4
+ _CITATION = """\
5
+ @InProceedings{huggingface:dataset,
6
+ title = {spam-text-messages-dataset},
7
+ author = {TrainingDataPro},
8
+ year = {2023}
9
+ }
10
+ """
11
+
12
+ _DESCRIPTION = """\
13
+ The dataset consisting of garbage cans of various capacities and types.
14
+ Best to train a neural network to monitor the timely removal of garbage and
15
+ organize the logistics of vehicles for garbage collection. Dataset is useful
16
+ for the recommendation systems, optimization and automization the work of
17
+ community services, smart city.
18
+ """
19
+ _NAME = 'spam-text-messages-dataset'
20
+
21
+ _HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
22
+
23
+ _LICENSE = ""
24
+
25
+ _DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
26
+
27
+
28
+ class SpamTextMessagesDataset(datasets.GeneratorBasedBuilder):
29
+ """Small sample of image-text pairs"""
30
+
31
+ def _info(self):
32
+ return datasets.DatasetInfo(
33
+ description=_DESCRIPTION,
34
+ features=datasets.Features({
35
+ 'image': datasets.Image(),
36
+ 'text': datasets.Value('string')
37
+ }),
38
+ supervised_keys=None,
39
+ homepage=_HOMEPAGE,
40
+ citation=_CITATION,
41
+ )
42
+
43
+ def _split_generators(self, dl_manager):
44
+ images = dl_manager.download(f"{_DATA}images.tar.gz")
45
+ annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
46
+ images = dl_manager.iter_archive(images)
47
+ return [
48
+ datasets.SplitGenerator(name=datasets.Split.TRAIN,
49
+ gen_kwargs={
50
+ "images": images,
51
+ 'annotations': annotations
52
+ }),
53
+ ]
54
+
55
+ def _generate_examples(self, images, annotations):
56
+ annotations_df = pd.read_csv(annotations, sep=';')
57
+
58
+ for idx, (image_path, image) in enumerate(images):
59
+ yield idx, {
60
+ "image": {
61
+ "path": image_path,
62
+ "bytes": image.read()
63
+ },
64
+ 'text':
65
+ annotations_df.loc[annotations_df['image'].str.lower() ==
66
+ image_path.lower()]['text'].values[0]
67
+ }