keremberke commited on
Commit
ccfc48a
1 Parent(s): 5f7149f

dataset uploaded by roboflow2huggingface package

Browse files
README.dataset.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # undefined > 640x640 M-H-L
2
+ https://public.roboflow.ai/object-detection/undefined
3
+
4
+ Provided by undefined
5
+ License: CC BY 4.0
6
+
7
+ # Smoke Detection Dataset
8
+
9
+ This computer vision smoke detection dataset contains images of synthsized smoke in both indoor and outdoor settings. Check out the source link below for more information on this dataset.
10
+
11
+ source:
12
+
13
+ Smoke100k dataset
14
+ https://bigmms.github.io/cheng_gcce19_smoke100k/
15
+
16
+
17
+ ## Use Cases
18
+ - Identifying smoke indoors
19
+ - Identifying smoke outdoors (but **not** with aerial imagery)
20
+ - Identifying smoke-like object (eg: mist/steam from humidifiers)
21
+
22
+
23
+
24
+ ## Testing
25
+
26
+ You can test this model by using the [Roboflow Inference Widget](https://blog.roboflow.com/testing-a-computer-vision-model-in-10-seconds-or-less/) found above. The action hits the model inference API, which in turn produces the color coded bounding boxes on the objects the model was trained to detect, along with its labels, and confidence for each prediction. The feature also produces the JSON output provided by the API.
27
+
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - object-detection
4
+ tags:
5
+ - roboflow
6
+ ---
7
+
8
+ ### Roboflow Dataset Page
9
+ https://universe.roboflow.com/smoke-detection/smoke100-uwe4t/dataset/4
10
+
11
+ ### Dataset Labels
12
+
13
+ ```
14
+ ['smoke']
15
+ ```
16
+
17
+ ### Citation
18
+
19
+ ```
20
+ @misc{ smoke100-uwe4t_dataset,
21
+ title = { Smoke100 Dataset },
22
+ type = { Open Source Dataset },
23
+ author = { Smoke Detection },
24
+ howpublished = { \\url{ https://universe.roboflow.com/smoke-detection/smoke100-uwe4t } },
25
+ url = { https://universe.roboflow.com/smoke-detection/smoke100-uwe4t },
26
+ journal = { Roboflow Universe },
27
+ publisher = { Roboflow },
28
+ year = { 2022 },
29
+ month = { dec },
30
+ note = { visited on 2023-01-02 },
31
+ }
32
+ ```
33
+
34
+ ### License
35
+ CC BY 4.0
36
+
37
+ ### Dataset Summary
38
+ This dataset was exported via roboflow.ai on March 17, 2022 at 3:42 PM GMT
39
+
40
+ It includes 21578 images.
41
+ Smoke are annotated in COCO format.
42
+
43
+ The following pre-processing was applied to each image:
44
+ * Auto-orientation of pixel data (with EXIF-orientation stripping)
45
+ * Resize to 640x640 (Stretch)
46
+
47
+ No image augmentation techniques were applied.
48
+
49
+
50
+
README.roboflow.txt ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Smoke100 - v4 640x640 M-H-L
3
+ ==============================
4
+
5
+ This dataset was exported via roboflow.ai on March 17, 2022 at 3:42 PM GMT
6
+
7
+ It includes 21578 images.
8
+ Smoke are annotated in COCO format.
9
+
10
+ The following pre-processing was applied to each image:
11
+ * Auto-orientation of pixel data (with EXIF-orientation stripping)
12
+ * Resize to 640x640 (Stretch)
13
+
14
+ No image augmentation techniques were applied.
15
+
16
+
data/test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f28d7b41394ffdc05a22bbc01e5062aa3308388740840e0bf82a735d746d6ffc
3
+ size 85342865
data/train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6eba0b1b3fe397ff2f8e30713c1c63da1431f5bad2a60a6b12e78c4fc3469074
3
+ size 603290033
data/valid.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71d3e20b89b605b8572158af4e7cad5c26b473560e4453c6470ec37aab3d112d
3
+ size 172611095
smoke-object-detection.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+
7
+
8
+ _HOMEPAGE = "https://universe.roboflow.com/smoke-detection/smoke100-uwe4t/dataset/4"
9
+ _LICENSE = "CC BY 4.0"
10
+ _CITATION = """\
11
+ @misc{ smoke100-uwe4t_dataset,
12
+ title = { Smoke100 Dataset },
13
+ type = { Open Source Dataset },
14
+ author = { Smoke Detection },
15
+ howpublished = { \\url{ https://universe.roboflow.com/smoke-detection/smoke100-uwe4t } },
16
+ url = { https://universe.roboflow.com/smoke-detection/smoke100-uwe4t },
17
+ journal = { Roboflow Universe },
18
+ publisher = { Roboflow },
19
+ year = { 2022 },
20
+ month = { dec },
21
+ note = { visited on 2023-01-02 },
22
+ }
23
+ """
24
+ _URLS = {
25
+ "train": "https://huggingface.co/datasets/keremberke/smoke-object-detection/resolve/main/data/train.zip",
26
+ "validation": "https://huggingface.co/datasets/keremberke/smoke-object-detection/resolve/main/data/valid.zip",
27
+ "test": "https://huggingface.co/datasets/keremberke/smoke-object-detection/resolve/main/data/test.zip",
28
+ }
29
+
30
+ _CATEGORIES = ['smoke']
31
+ _ANNOTATION_FILENAME = "_annotations.coco.json"
32
+
33
+
34
+ class SMOKEOBJECTDETECTION(datasets.GeneratorBasedBuilder):
35
+ VERSION = datasets.Version("1.0.0")
36
+
37
+ def _info(self):
38
+ features = datasets.Features(
39
+ {
40
+ "image_id": datasets.Value("int64"),
41
+ "image": datasets.Image(),
42
+ "width": datasets.Value("int32"),
43
+ "height": datasets.Value("int32"),
44
+ "objects": datasets.Sequence(
45
+ {
46
+ "id": datasets.Value("int64"),
47
+ "area": datasets.Value("int64"),
48
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
49
+ "category": datasets.ClassLabel(names=_CATEGORIES),
50
+ }
51
+ ),
52
+ }
53
+ )
54
+ return datasets.DatasetInfo(
55
+ features=features,
56
+ homepage=_HOMEPAGE,
57
+ citation=_CITATION,
58
+ license=_LICENSE,
59
+ )
60
+
61
+ def _split_generators(self, dl_manager):
62
+ data_files = dl_manager.download_and_extract(_URLS)
63
+ return [
64
+ datasets.SplitGenerator(
65
+ name=datasets.Split.TRAIN,
66
+ gen_kwargs={
67
+ "folder_dir": data_files["train"],
68
+ },
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.VALIDATION,
72
+ gen_kwargs={
73
+ "folder_dir": data_files["validation"],
74
+ },
75
+ ),
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.TEST,
78
+ gen_kwargs={
79
+ "folder_dir": data_files["test"],
80
+ },
81
+ ),
82
+ ]
83
+
84
+ def _generate_examples(self, folder_dir):
85
+ def process_annot(annot, category_id_to_category):
86
+ return {
87
+ "id": annot["id"],
88
+ "area": annot["area"],
89
+ "bbox": annot["bbox"],
90
+ "category": category_id_to_category[annot["category_id"]],
91
+ }
92
+
93
+ image_id_to_image = {}
94
+ idx = 0
95
+
96
+ annotation_filepath = os.path.join(folder_dir, _ANNOTATION_FILENAME)
97
+ with open(annotation_filepath, "r") as f:
98
+ annotations = json.load(f)
99
+ category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
100
+ image_id_to_annotations = collections.defaultdict(list)
101
+ for annot in annotations["annotations"]:
102
+ image_id_to_annotations[annot["image_id"]].append(annot)
103
+ image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
104
+
105
+ for filename in os.listdir(folder_dir):
106
+ filepath = os.path.join(folder_dir, filename)
107
+ if filename in image_id_to_image:
108
+ image = image_id_to_image[filename]
109
+ objects = [
110
+ process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
111
+ ]
112
+ with open(filepath, "rb") as f:
113
+ image_bytes = f.read()
114
+ yield idx, {
115
+ "image_id": image["id"],
116
+ "image": {"path": filepath, "bytes": image_bytes},
117
+ "width": image["width"],
118
+ "height": image["height"],
119
+ "objects": objects,
120
+ }
121
+ idx += 1