keremberke commited on
Commit
57573d5
1 Parent(s): 9d3fc99

dataset uploaded by roboflow2huggingface package

Browse files
README.dataset.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Construction Site Safety > original_raw-images
2
+ https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety
3
+
4
+ Provided by a Roboflow user
5
+ License: CC BY 4.0
6
+
7
+ videos from:
8
+ * https://www.youtube.com/watch?v=Dhxf5mm7g1g
9
+ * https://www.youtube.com/watch?v=rYv9JZ2XBW4
10
+
11
+ images cloned from:
12
+ * https://universe.roboflow.com/roboflow-universe-projects/personal-protective-equipment-combined-model/
13
+ * https://universe.roboflow.com/roboflow-universe-projects/people-and-ladders/
14
+ * https://universe.roboflow.com/roboflow-universe-projects/safety-vests/
15
+ * https://universe.roboflow.com/mohamed-sabek-6zmr6/excavators-cwlh0
16
+ * https://universe.roboflow.com/popular-benchmarks/mit-indoor-scene-recognition/ - null images
17
+ * https://universe.roboflow.com/mohamed-traore-2ekkp/people-detection-general
18
+ * https://universe.roboflow.com/labeler-projects/construction-madness/
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ task_categories:
3
+ - object-detection
4
+ tags:
5
+ - roboflow
6
+ ---
7
+
8
+ ### Roboflow Dataset Page
9
+ https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety
10
+
11
+ ### Citation
12
+ ```
13
+ @misc{ construction-site-safety_dataset,
14
+ title = { Construction Site Safety Dataset },
15
+ type = { Open Source Dataset },
16
+ author = { Roboflow Universe Projects },
17
+ howpublished = { \\url{ https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety } },
18
+ url = { https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety },
19
+ journal = { Roboflow Universe },
20
+ publisher = { Roboflow },
21
+ year = { 2022 },
22
+ month = { dec },
23
+ note = { visited on 2022-12-29 },
24
+ }
25
+ ```
26
+
27
+ ### License
28
+ CC BY 4.0
29
+
30
+ ### Dataset Summary
31
+ This dataset was exported via roboflow.com on December 29, 2022 at 11:22 AM GMT
32
+
33
+ Roboflow is an end-to-end computer vision platform that helps you
34
+ * collaborate with your team on computer vision projects
35
+ * collect & organize images
36
+ * understand unstructured image data
37
+ * annotate, and create datasets
38
+ * export, train, and deploy computer vision models
39
+ * use active learning to improve your dataset over time
40
+
41
+ It includes 398 images.
42
+ Construction are annotated in COCO format.
43
+
44
+ The following pre-processing was applied to each image:
45
+ * Auto-orientation of pixel data (with EXIF-orientation stripping)
46
+
47
+ No image augmentation techniques were applied.
48
+
49
+
50
+
README.roboflow.txt ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ Construction Site Safety - v1 original_raw-images
3
+ ==============================
4
+
5
+ This dataset was exported via roboflow.com on December 29, 2022 at 11:22 AM GMT
6
+
7
+ Roboflow is an end-to-end computer vision platform that helps you
8
+ * collaborate with your team on computer vision projects
9
+ * collect & organize images
10
+ * understand unstructured image data
11
+ * annotate, and create datasets
12
+ * export, train, and deploy computer vision models
13
+ * use active learning to improve your dataset over time
14
+
15
+ It includes 398 images.
16
+ Construction are annotated in COCO format.
17
+
18
+ The following pre-processing was applied to each image:
19
+ * Auto-orientation of pixel data (with EXIF-orientation stripping)
20
+
21
+ No image augmentation techniques were applied.
22
+
23
+
construction-safety-object-detection.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import json
3
+ import os
4
+
5
+ import datasets
6
+
7
+
8
+ _HOMEPAGE = "https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety"
9
+ _LICENSE = "CC BY 4.0"
10
+ _CITATION = """\
11
+ @misc{ construction-site-safety_dataset,
12
+ title = { Construction Site Safety Dataset },
13
+ type = { Open Source Dataset },
14
+ author = { Roboflow Universe Projects },
15
+ howpublished = { \\url{ https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety } },
16
+ url = { https://universe.roboflow.com/roboflow-universe-projects/construction-site-safety },
17
+ journal = { Roboflow Universe },
18
+ publisher = { Roboflow },
19
+ year = { 2022 },
20
+ month = { dec },
21
+ note = { visited on 2022-12-29 },
22
+ }
23
+ """
24
+ _URLS = {
25
+ "train": "https://huggingface.co/datasets/keremberke/construction-safety-object-detection/resolve/main/data/train.zip",
26
+ "validation": "https://huggingface.co/datasets/keremberke/construction-safety-object-detection/resolve/main/data/valid.zip",
27
+ "test": "https://huggingface.co/datasets/keremberke/construction-safety-object-detection/resolve/main/data/test.zip",
28
+ }
29
+
30
+ _CATEGORIES = ['SUV', 'truck and trailer', 'wheel loader', 'machinery', 'Person', 'trailer', 'bus', 'semi', 'Safety Cone', 'vehicle', 'sedan', 'fire hydrant', 'mini-van', 'NO-Safety Vest', 'van', 'NO-Hardhat', 'Excavator', 'NO-Mask', 'dump truck', 'truck', 'Mask', 'Ladder', 'Safety Vest', 'Hardhat', 'Gloves']
31
+ _ANNOTATION_FILENAME = "_annotations.coco.json"
32
+
33
+
34
+ class CONSTRUCTIONSAFETYOBJECTDETECTION(datasets.GeneratorBasedBuilder):
35
+ VERSION = datasets.Version("1.0.0")
36
+
37
+ def _info(self):
38
+ features = datasets.Features(
39
+ {
40
+ "image_id": datasets.Value("int64"),
41
+ "image": datasets.Image(),
42
+ "width": datasets.Value("int32"),
43
+ "height": datasets.Value("int32"),
44
+ "objects": datasets.Sequence(
45
+ {
46
+ "id": datasets.Value("int64"),
47
+ "area": datasets.Value("int64"),
48
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
49
+ "category": datasets.ClassLabel(names=_CATEGORIES),
50
+ }
51
+ ),
52
+ }
53
+ )
54
+ return datasets.DatasetInfo(
55
+ features=features,
56
+ homepage=_HOMEPAGE,
57
+ citation=_CITATION,
58
+ license=_LICENSE,
59
+ )
60
+
61
+ def _split_generators(self, dl_manager):
62
+ data_files = dl_manager.download_and_extract(_URLS)
63
+ return [
64
+ datasets.SplitGenerator(
65
+ name=datasets.Split.TRAIN,
66
+ gen_kwargs={
67
+ "folder_dir": data_files["train"],
68
+ },
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.VALIDATION,
72
+ gen_kwargs={
73
+ "folder_dir": data_files["validation"],
74
+ },
75
+ ),
76
+ datasets.SplitGenerator(
77
+ name=datasets.Split.TEST,
78
+ gen_kwargs={
79
+ "folder_dir": data_files["test"],
80
+ },
81
+ ),
82
+ ]
83
+
84
+ def _generate_examples(self, folder_dir):
85
+ def process_annot(annot, category_id_to_category):
86
+ return {
87
+ "id": annot["id"],
88
+ "area": annot["area"],
89
+ "bbox": annot["bbox"],
90
+ "category": category_id_to_category[annot["category_id"]],
91
+ }
92
+
93
+ image_id_to_image = {}
94
+ idx = 0
95
+
96
+ annotation_filepath = os.path.join(folder_dir, _ANNOTATION_FILENAME)
97
+ with open(annotation_filepath, "r") as f:
98
+ annotations = json.load(f)
99
+ category_id_to_category = {category["id"]: category["name"] for category in annotations["categories"]}
100
+ image_id_to_annotations = collections.defaultdict(list)
101
+ for annot in annotations["annotations"]:
102
+ image_id_to_annotations[annot["image_id"]].append(annot)
103
+ image_id_to_image = {annot["file_name"]: annot for annot in annotations["images"]}
104
+
105
+ for filename in os.listdir(folder_dir):
106
+ filepath = os.path.join(folder_dir, filename)
107
+ if filename in image_id_to_image:
108
+ image = image_id_to_image[filename]
109
+ objects = [
110
+ process_annot(annot, category_id_to_category) for annot in image_id_to_annotations[image["id"]]
111
+ ]
112
+ with open(filepath, "rb") as f:
113
+ image_bytes = f.read()
114
+ yield idx, {
115
+ "image_id": image["id"],
116
+ "image": {"path": filepath, "bytes": image_bytes},
117
+ "width": image["width"],
118
+ "height": image["height"],
119
+ "objects": objects,
120
+ }
121
+ idx += 1
data/test.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d580d8d6df74dd5f2cb1bfb781adc809a49c5b39cea9a6955b4ab77a1e8694c
3
+ size 1796485
data/train.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e06ac9d010880131edc2d460bf2669f6609d02626b0348d07a0c31f8f3e8f0f0
3
+ size 22259666
data/valid.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:545f0da25bb34805b2b198aedadc702793fb102d791157e9d1cead170be26594
3
+ size 3338812