zhuchi76 commited on
Commit
e0c1ee1
1 Parent(s): e5658ed

Update script to hub

Browse files
Files changed (1) hide show
  1. Boat_dataset.py +112 -0
Boat_dataset.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Source: https://github.com/huggingface/datasets/blob/main/templates/new_dataset_script.py
2
+
3
+ import csv
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+ _CITATION = """\
10
+ @InProceedings{huggingface:dataset,
11
+ title = {Boat dataset},
12
+ author={Tzu-Chi Chen, Inc.},
13
+ year={2024}
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ This dataset is designed to solve an object detection task with images of boats.
19
+ """
20
+
21
+ _HOMEPAGE = "https://huggingface.co/datasets/zhuchi76/Boat_dataset/resolve/main"
22
+
23
+ _LICENSE = ""
24
+
25
+ _URLS = {
26
+ "classes": f"{_HOMEPAGE}/data/classes.txt",
27
+ "train": f"{_HOMEPAGE}/data/instances_train2023.jsonl",
28
+ "val": f"{_HOMEPAGE}/data/instances_val2023.jsonl",
29
+ "test": f"{_HOMEPAGE}/data/instances_val2023r.jsonl"
30
+ }
31
+
32
+ class BoatDataset(datasets.GeneratorBasedBuilder):
33
+
34
+ VERSION = datasets.Version("1.1.0")
35
+
36
+ BUILDER_CONFIGS = [
37
+ datasets.BuilderConfig(name="Boat_dataset", version=VERSION, description="Dataset for detecting boats in aerial images."),
38
+ ]
39
+
40
+ DEFAULT_CONFIG_NAME = "Boat_dataset" # Provide a default configuration
41
+
42
+ def _info(self):
43
+ return datasets.DatasetInfo(
44
+ description=_DESCRIPTION,
45
+ features=datasets.Features({
46
+ 'image_id': datasets.Value('int32'),
47
+ 'image_path': datasets.Value('string'),
48
+ 'width': datasets.Value('int32'),
49
+ 'height': datasets.Value('int32'),
50
+ 'objects': datasets.Features({
51
+ 'id': datasets.Sequence(datasets.Value('int32')),
52
+ 'area': datasets.Sequence(datasets.Value('float32')),
53
+ 'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('float32'), length=4)), # [x, y, width, height]
54
+ 'category': datasets.Sequence(datasets.Value('int32'))
55
+ }),
56
+ }),
57
+ homepage=_HOMEPAGE,
58
+ license=_LICENSE,
59
+ citation=_CITATION,
60
+ )
61
+
62
+ def _split_generators(self, dl_manager):
63
+ # Download all files and extract them
64
+ downloaded_files = dl_manager.download_and_extract(_URLS)
65
+
66
+ # Load class labels from the classes file
67
+ with open('classes.txt', 'r') as file:
68
+ classes = [line.strip() for line in file.readlines()]
69
+
70
+ return [
71
+ datasets.SplitGenerator(
72
+ name=datasets.Split.TRAIN,
73
+ gen_kwargs={
74
+ "annotations_file": downloaded_files["train"],
75
+ "classes": classes,
76
+ "split": "train",
77
+ }
78
+ ),
79
+ datasets.SplitGenerator(
80
+ name=datasets.Split.VALIDATION,
81
+ gen_kwargs={
82
+ "annotations_file": downloaded_files["val"],
83
+ "classes": classes,
84
+ "split": "val",
85
+ }
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TEST,
89
+ gen_kwargs={
90
+ "annotations_file": downloaded_files["test"],
91
+ "classes": classes,
92
+ "split": "val_real",
93
+ }
94
+ ),
95
+ ]
96
+
97
+ def _generate_examples(self, annotations_file, classes, split):
98
+ # Process annotations
99
+ with open(annotations_file, encoding="utf-8") as f:
100
+ for key, row in enumerate(f):
101
+ try:
102
+ data = json.loads(row.strip())
103
+ yield key, {
104
+ "image_id": data["image_id"],
105
+ "image_path": data["image_path"],
106
+ "width": data["width"],
107
+ "height": data["height"],
108
+ "objects": data["objects"],
109
+ }
110
+ except json.JSONDecodeError:
111
+ print(f"Skipping invalid JSON at line {key + 1}: {row}")
112
+ continue