Tuteldove commited on
Commit
46e9284
1 Parent(s): 1a83cc1

Upload 6 files

Browse files
.gitattributes CHANGED
@@ -1,55 +1,55 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
- *.model filter=lfs diff=lfs merge=lfs -text
14
- *.msgpack filter=lfs diff=lfs merge=lfs -text
15
- *.npy filter=lfs diff=lfs merge=lfs -text
16
- *.npz filter=lfs diff=lfs merge=lfs -text
17
- *.onnx filter=lfs diff=lfs merge=lfs -text
18
- *.ot filter=lfs diff=lfs merge=lfs -text
19
- *.parquet filter=lfs diff=lfs merge=lfs -text
20
- *.pb filter=lfs diff=lfs merge=lfs -text
21
- *.pickle filter=lfs diff=lfs merge=lfs -text
22
- *.pkl filter=lfs diff=lfs merge=lfs -text
23
- *.pt filter=lfs diff=lfs merge=lfs -text
24
- *.pth filter=lfs diff=lfs merge=lfs -text
25
- *.rar filter=lfs diff=lfs merge=lfs -text
26
- *.safetensors filter=lfs diff=lfs merge=lfs -text
27
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
- *.tar.* filter=lfs diff=lfs merge=lfs -text
29
- *.tar filter=lfs diff=lfs merge=lfs -text
30
- *.tflite filter=lfs diff=lfs merge=lfs -text
31
- *.tgz filter=lfs diff=lfs merge=lfs -text
32
- *.wasm filter=lfs diff=lfs merge=lfs -text
33
- *.xz filter=lfs diff=lfs merge=lfs -text
34
- *.zip filter=lfs diff=lfs merge=lfs -text
35
- *.zst filter=lfs diff=lfs merge=lfs -text
36
- *tfevents* filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - uncompressed
38
- *.pcm filter=lfs diff=lfs merge=lfs -text
39
- *.sam filter=lfs diff=lfs merge=lfs -text
40
- *.raw filter=lfs diff=lfs merge=lfs -text
41
- # Audio files - compressed
42
- *.aac filter=lfs diff=lfs merge=lfs -text
43
- *.flac filter=lfs diff=lfs merge=lfs -text
44
- *.mp3 filter=lfs diff=lfs merge=lfs -text
45
- *.ogg filter=lfs diff=lfs merge=lfs -text
46
- *.wav filter=lfs diff=lfs merge=lfs -text
47
- # Image files - uncompressed
48
- *.bmp filter=lfs diff=lfs merge=lfs -text
49
- *.gif filter=lfs diff=lfs merge=lfs -text
50
- *.png filter=lfs diff=lfs merge=lfs -text
51
- *.tiff filter=lfs diff=lfs merge=lfs -text
52
- # Image files - compressed
53
- *.jpg filter=lfs diff=lfs merge=lfs -text
54
- *.jpeg filter=lfs diff=lfs merge=lfs -text
55
- *.webp filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
12
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
13
+ *.model filter=lfs diff=lfs merge=lfs -text
14
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
15
+ *.npy filter=lfs diff=lfs merge=lfs -text
16
+ *.npz filter=lfs diff=lfs merge=lfs -text
17
+ *.onnx filter=lfs diff=lfs merge=lfs -text
18
+ *.ot filter=lfs diff=lfs merge=lfs -text
19
+ *.parquet filter=lfs diff=lfs merge=lfs -text
20
+ *.pb filter=lfs diff=lfs merge=lfs -text
21
+ *.pickle filter=lfs diff=lfs merge=lfs -text
22
+ *.pkl filter=lfs diff=lfs merge=lfs -text
23
+ *.pt filter=lfs diff=lfs merge=lfs -text
24
+ *.pth filter=lfs diff=lfs merge=lfs -text
25
+ *.rar filter=lfs diff=lfs merge=lfs -text
26
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
27
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
29
+ *.tar filter=lfs diff=lfs merge=lfs -text
30
+ *.tflite filter=lfs diff=lfs merge=lfs -text
31
+ *.tgz filter=lfs diff=lfs merge=lfs -text
32
+ *.wasm filter=lfs diff=lfs merge=lfs -text
33
+ *.xz filter=lfs diff=lfs merge=lfs -text
34
+ *.zip filter=lfs diff=lfs merge=lfs -text
35
+ *.zst filter=lfs diff=lfs merge=lfs -text
36
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
37
+ # Audio files - uncompressed
38
+ *.pcm filter=lfs diff=lfs merge=lfs -text
39
+ *.sam filter=lfs diff=lfs merge=lfs -text
40
+ *.raw filter=lfs diff=lfs merge=lfs -text
41
+ # Audio files - compressed
42
+ *.aac filter=lfs diff=lfs merge=lfs -text
43
+ *.flac filter=lfs diff=lfs merge=lfs -text
44
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
45
+ *.ogg filter=lfs diff=lfs merge=lfs -text
46
+ *.wav filter=lfs diff=lfs merge=lfs -text
47
+ # Image files - uncompressed
48
+ *.bmp filter=lfs diff=lfs merge=lfs -text
49
+ *.gif filter=lfs diff=lfs merge=lfs -text
50
+ *.png filter=lfs diff=lfs merge=lfs -text
51
+ *.tiff filter=lfs diff=lfs merge=lfs -text
52
+ # Image files - compressed
53
+ *.jpg filter=lfs diff=lfs merge=lfs -text
54
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
55
+ *.webp filter=lfs diff=lfs merge=lfs -text
Boat_dataset.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Source: https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py
2
+
3
+ import csv
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+ _CITATION = """\
10
+ @InProceedings{huggingface:dataset,
11
+ title = {Boat dataset},
12
+ author={XXX, Inc.},
13
+ year={2024}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ This dataset is designed to solve an object detection task with images of boats.
19
+ """
20
+
21
+ _HOMEPAGE = "https://huggingface.co/datasets/SIS-2024-spring/Boat_dataset/resolve/main"
22
+
23
+ _LICENSE = ""
24
+
25
+ _URLS = {
26
+ "classes": f"{_HOMEPAGE}/data/classes.txt",
27
+ "train": f"{_HOMEPAGE}/data/instances_train2023r.jsonl",
28
+ "val": f"{_HOMEPAGE}/data/instances_val2023r.jsonl",
29
+ }
30
+
31
+ class BoatDataset(datasets.GeneratorBasedBuilder):
32
+
33
+ VERSION = datasets.Version("1.1.0")
34
+
35
+ BUILDER_CONFIGS = [
36
+ datasets.BuilderConfig(name="Boat_dataset", version=VERSION, description="Dataset for detecting boats in aerial images."),
37
+ ]
38
+
39
+ DEFAULT_CONFIG_NAME = "Boat_dataset" # Provide a default configuration
40
+
41
+ def _info(self):
42
+ return datasets.DatasetInfo(
43
+ description=_DESCRIPTION,
44
+ features=datasets.Features({
45
+ 'image_id': datasets.Value('int32'),
46
+ 'image_path': datasets.Value('string'),
47
+ 'width': datasets.Value('int32'),
48
+ 'height': datasets.Value('int32'),
49
+ 'objects': datasets.Features({
50
+ 'id': datasets.Sequence(datasets.Value('int32')),
51
+ 'area': datasets.Sequence(datasets.Value('float32')),
52
+ 'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('float32'), length=4)), # [x, y, width, height]
53
+ 'category': datasets.Sequence(datasets.Value('int32'))
54
+ }),
55
+ }),
56
+ homepage=_HOMEPAGE,
57
+ license=_LICENSE,
58
+ citation=_CITATION,
59
+ )
60
+
61
+ def _split_generators(self, dl_manager):
62
+ # Download all files and extract them
63
+ downloaded_files = dl_manager.download_and_extract(_URLS)
64
+
65
+ # Load class labels from the classes file
66
+ with open('classes.txt', 'r') as file:
67
+ classes = [line.strip() for line in file.readlines()]
68
+
69
+ return [
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TRAIN,
72
+ gen_kwargs={
73
+ "annotations_file": downloaded_files["train"],
74
+ "classes": classes,
75
+ "split": "train",
76
+ }
77
+ ),
78
+ datasets.SplitGenerator(
79
+ name=datasets.Split.VALIDATION,
80
+ gen_kwargs={
81
+ "annotations_file": downloaded_files["val"],
82
+ "classes": classes,
83
+ "split": "val",
84
+ }
85
+ ),
86
+ ]
87
+
88
+ def _generate_examples(self, annotations_file, classes, split):
89
+ # Process annotations
90
+ with open(annotations_file, encoding="utf-8") as f:
91
+ for key, row in enumerate(f):
92
+ try:
93
+ data = json.loads(row.strip())
94
+ yield key, {
95
+ "image_id": data["image_id"],
96
+ "image_path": data["image_path"],
97
+ "width": data["width"],
98
+ "height": data["height"],
99
+ "objects": data["objects"],
100
+ }
101
+ except json.JSONDecodeError:
102
+ print(f"Skipping invalid JSON at line {key + 1}: {row}")
103
+ continue
README.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ viewer: false
3
+ ---
4
+
5
+ # Boat Dataset for Object Detection
6
+
7
+ ## Overview
8
+ This dataset contains images of real & virtual boats for object detection tasks. It can be used to train and evaluate object detection models.
9
+
10
+ ## Dataset Structure
11
+
12
+ ### Data Instances
13
+
14
+ A data point comprises an image and its object annotations.
15
+
16
+ ```
17
+ {'image_id': 0,
18
+ 'image_path': 'images/0720_0937_2023-07-20-09-37-30_0_middle_color000220.jpg',
19
+ 'width': 640,
20
+ 'height': 480,
21
+ 'objects': {'id': [1],
22
+ 'area': [328.0],
23
+ 'bbox': [[153.69000244140625,
24
+ 101.76499938964844,
25
+ 21.924999237060547,
26
+ 14.972999572753906]],
27
+ 'category': [8]}}
28
+ ```
29
+
30
+ ### Data Fields
31
+
32
+ - `image_id`: the image id
33
+ - `width`: the image width
34
+ - `height`: the image height
35
+ - `objects`: a dictionary containing bounding box metadata for the objects present on the image
36
+ - `id`: the annotation id
37
+ - `area`: the area of the bounding box
38
+ - `bbox`: the object's bounding box (in the [coco](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) format)
39
+ - `category`: the object's category, with possible values including
40
+ - `BallonBoat` (0)
41
+ - `BigBoat` (1)
42
+ - `Boat` (2)
43
+ - `JetSki` (3)
44
+ - `Katamaran` (4)
45
+ - `SailBoat` (5)
46
+ - `SmallBoat` (6)
47
+ - `SpeedBoat` (7)
48
+ - `WAM_V` (8)
49
+
50
+
51
+ ### Data Splits
52
+
53
+ - `Training dataset` (42833)
54
+ - `Real`
55
+ - `WAM_V` (2333)
56
+ - `Virtual`
57
+ - `BallonBoat` (4500)
58
+ - `BigBoat` (4500)
59
+ - `Boat` (4500)
60
+ - `JetSki` (4500)
61
+ - `Katamaran` (4500)
62
+ - `SailBoat` (4500)
63
+ - `SmallBoat` (4500)
64
+ - `SpeedBoat` (4500)
65
+ - `WAM_V` (4500)
66
+
67
+ - `Val dataset` (5400)
68
+ - `Real`
69
+ - `WAM_V` (900)
70
+ - `Virtual`
71
+ - `BallonBoat` (500)
72
+ - `BigBoat` (500)
73
+ - `Boat` (500)
74
+ - `JetSki` (500)
75
+ - `Katamaran` (500)
76
+ - `SailBoat` (500)
77
+ - `SmallBoat` (500)
78
+ - `SpeedBoat` (500)
79
+ - `WAM_V` (500)
80
+
81
+
82
+ ## Usage
83
+ ```
84
+ from datasets import load_dataset
85
+ dataset = load_dataset("zhuchi76/Boat_dataset")
86
+ ```
87
+
88
+ ## Citation
89
+ If you use this dataset in your research, please cite the following paper:
data/classes.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ BallonBoat
2
+ BigBoat
3
+ Boat
4
+ JetSki
5
+ Katamaran
6
+ SailBoat
7
+ SmallBoat
8
+ SpeedBoat
9
+ WAM_V
data/instances_train2023r.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/instances_val2023r.jsonl ADDED
The diff for this file is too large to render. See raw diff