Iulia Elisa commited on
Commit
49715f1
1 Parent(s): 6accdbd

minor changes

Browse files
Files changed (3) hide show
  1. README.md +2 -17
  2. load_and_visualise_dataset.ipynb +0 -0
  3. utils.py +0 -322
README.md CHANGED
@@ -90,29 +90,14 @@ xami_dataset = XAMIDataset(
90
  dataset_name="xami_dataset",
91
  data_path='./dest_dir')
92
  ```
93
- **or**
 
94
 
95
- - using a CLI command
96
  ```bash
97
  DEST_DIR='/path/to/local/dataset/dir'
98
 
99
  huggingface-cli download iulia-elisa/XAMI-dataset xami_dataset.zip --repo-type dataset --local-dir "$DEST_DIR" && unzip "$DEST_DIR/xami_dataset.zip" -d "$DEST_DIR" && rm "$DEST_DIR/xami_dataset.zip"
100
-
101
  ```
102
- <!--
103
- # Dataset Split with SKF (Optional)
104
-
105
- - The below method allows for dataset splitting, using the pre-generated splits in CSV files. This step is useful when training multiple dataset splits versions to gain mor generalised view on metrics.
106
- ```python
107
- import utils
108
-
109
- # run multilabel SKF split with the standard k=4
110
- csv_files = ['mskf_0.csv', 'mskf_1.csv', 'mskf_2.csv', 'mskf_3.csv']
111
-
112
- for idx, csv_file in enumerate(csv_files):
113
- mskf = pd.read_csv(csv_file)
114
- utils.create_directories_and_copy_files(images_dir, data_in, mskf, idx)
115
- ``` -->
116
 
117
  ## Licence
118
  **[CC BY-NC 3.0 IGO](https://creativecommons.org/licenses/by-nc/3.0/igo/deed.en).**
 
90
  dataset_name="xami_dataset",
91
  data_path='./dest_dir')
92
  ```
93
+ ###
94
+ Or you can simply download only the dataset zip file from HuggingFace using a CLI command:
95
 
 
96
  ```bash
97
  DEST_DIR='/path/to/local/dataset/dir'
98
 
99
  huggingface-cli download iulia-elisa/XAMI-dataset xami_dataset.zip --repo-type dataset --local-dir "$DEST_DIR" && unzip "$DEST_DIR/xami_dataset.zip" -d "$DEST_DIR" && rm "$DEST_DIR/xami_dataset.zip"
 
100
  ```
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
  ## Licence
103
  **[CC BY-NC 3.0 IGO](https://creativecommons.org/licenses/by-nc/3.0/igo/deed.en).**
load_and_visualise_dataset.ipynb DELETED
The diff for this file is too large to render. See raw diff
 
utils.py DELETED
@@ -1,322 +0,0 @@
1
- import os
2
- import json
3
- from shutil import copy
4
- import pandas as pd
5
- from pathlib import Path
6
- from PIL import Image, ImageDraw
7
- import cv2
8
- import numpy as np
9
- import re
10
- import datasets
11
- from datasets import Value
12
- from io import BytesIO
13
- from PIL import Image, ImageDraw, ImageFont
14
- import matplotlib.pyplot as plt
15
- import matplotlib.patches as patches
16
-
17
- def create_directories_and_copy_files(images_dir, coco_data, image_data, k):
18
- base_dir = os.path.join(images_dir, f'mskf_{k}')
19
- os.makedirs(base_dir, exist_ok=True)
20
-
21
- for split in np.unique(image_data['SPLIT']):
22
- split_dir = os.path.join(base_dir, split)
23
- os.makedirs(split_dir, exist_ok=True)
24
-
25
- # Filter the annotations
26
- split_ids = image_data[image_data['SPLIT'] == split]['IMADE_ID'].tolist()
27
- annotations = {
28
- 'images': [img for img in coco_data['images'] if img['id'] in split_ids],
29
- 'annotations': [ann for ann in coco_data['annotations'] if ann['image_id'] in split_ids],
30
- 'categories': coco_data['categories']
31
- }
32
-
33
- # Write the filtered annotations to a file
34
- with open(os.path.join(split_dir, '_annotations.coco.json'), 'w') as f:
35
- json.dump(annotations, f, indent=4)
36
-
37
- # Copy the images
38
- split_data = image_data[image_data['SPLIT'] == split]
39
- for _, row in split_data.iterrows():
40
- source = row['IMAGE_PATH']
41
- destination = os.path.join(split_dir, os.path.basename(source))
42
- copy(source, destination)
43
-
44
- print(f'Dataset split for mskf_{k} was successful.')
45
-
46
- def split_to_df(dataset_dir, split):
47
- annotations_path = Path(dataset_dir+split+'/_annotations.coco.json')
48
-
49
- with annotations_path.open('r') as f:
50
- coco_data = json.load(f)
51
-
52
- def image_from_path(file_path):
53
- image = cv2.imread(file_path)
54
- return image
55
-
56
- def gen_segmentation(segmentation, width, height):
57
- mask_img = np.zeros((height, width, 3), dtype=np.uint8)
58
- for segment in segmentation:
59
- pts = np.array(segment, np.int32).reshape((-1, 1, 2))
60
- cv2.fillPoly(mask_img, [pts], (255, 255, 255)) # Fill color in BGR
61
-
62
- return mask_img
63
-
64
- images_df = pd.DataFrame(coco_data['images'][:50], columns=['id', 'file_name', 'width', 'height'])
65
- annotations_df = pd.DataFrame(coco_data['annotations'])
66
- df = pd.merge(annotations_df, images_df, left_on='image_id', right_on='id')
67
- image_folder = annotations_path.parent
68
- df['file_path'] = df['file_name'].apply(lambda x: str(image_folder / x))
69
- df['observation'] = df['file_name'].apply(lambda x: x.split('.')[0].replace('_png', ''))
70
- df['image'] = df['file_path'].apply(image_from_path)
71
- df['segmentation'] = df.apply(lambda row: gen_segmentation(row['segmentation'], row['width'], row['height']), axis=1)
72
- df = df.drop('file_path', axis=1)
73
- df = df.drop('file_name', axis=1)
74
- df['annot_id'] = df['id_x']
75
- df = df.drop('id_x', axis=1)
76
- df = df.drop('id_y', axis=1)
77
-
78
- # take image fro df, and the corresponging annotations and plot them on image
79
- # for i in range(5):
80
- # img = df['image'][i]
81
- # annot_id = df['annot_id'][i]
82
- # # plot the image with the annotation using plt
83
- # if img.dtype != np.uint8:
84
- # img = img.astype(np.uint8)
85
- # # plot
86
- # segm_polygon = df['segmentation'][i]
87
- # plt.imshow(segm_polygon)
88
- # plt.axis('off')
89
- # plt.show()
90
- # plt.close()
91
-
92
- return df, coco_data
93
-
94
- def df_to_dataset_dict(df, coco_data, cats_to_colours):
95
-
96
- def annot_on_image(annot_id, img_array, cat_id, annot_type='segm'):
97
- if img_array.dtype != np.uint8:
98
- img_array = img_array.astype(np.uint8)
99
-
100
- pil_image = Image.fromarray(img_array)
101
- draw = ImageDraw.Draw(pil_image)
102
- if annot_type=='bbox':
103
- bbox = [annot for annot in coco_data['annotations'] if annot['id'] == annot_id][0]['bbox']
104
- x_min, y_min, width, height = bbox
105
- top_left = (x_min, y_min)
106
- bottom_right = (x_min + width, y_min + height)
107
-
108
- draw.rectangle([top_left, bottom_right], outline=cats_to_colours[cat_id][1], width=2)
109
- else:
110
- # look for the annotation in coco_data that corresponds to the annot_id
111
- segm_polygon = [annot for annot in coco_data['annotations'] if annot['id'] == annot_id][0]['segmentation'][0]
112
- polygon = [(segm_polygon[i], segm_polygon[i+1]) for i in range(0, len(segm_polygon), 2)]
113
- draw.polygon(polygon, outline=cats_to_colours[cat_id][1], width=2)
114
-
115
- # plt.imshow(pil_image)
116
- # plt.axis('off')
117
- # plt.show()
118
- # plt.close()
119
-
120
- byte_io = BytesIO()
121
- pil_image.save(byte_io, 'PNG')
122
- byte_io.seek(0)
123
- png_image = Image.open(byte_io)
124
-
125
- return png_image
126
-
127
- dictionary = df.to_dict(orient='list')
128
- feats=datasets.Features({"observation id":Value(dtype='string'), \
129
- 'segmentation': datasets.Image(), \
130
- 'bbox':datasets.Image() , \
131
- 'label': Value(dtype='string'),\
132
- 'area':Value(dtype='string'),
133
- 'image shape':Value(dtype='string')})
134
-
135
- dataset_data = {"observation id":dictionary['observation'], \
136
- 'segmentation': [annot_on_image(dictionary['annot_id'][i], dictionary['image'][i], dictionary['category_id'][i]) \
137
- for i in range(len(dictionary['segmentation']))], \
138
- 'bbox': [annot_on_image(dictionary['annot_id'][i], dictionary['image'][i], dictionary['category_id'][i], annot_type='bbox') \
139
- for i in range(len(dictionary['bbox']))], \
140
- 'label': [cats_to_colours[cat][0] for cat in dictionary['category_id']],\
141
- 'area':['%.3f'%(value) for value in dictionary['area']], \
142
- 'image shape':[f"({dictionary['width'][i]}, {dictionary['height'][i]})" for i in range(len(dictionary['width']))]}
143
- the_dataset=datasets.Dataset.from_dict(dataset_data,features=feats)
144
-
145
- return the_dataset
146
-
147
- def merge_coco_jsons(first_json, second_json, output_path):
148
-
149
- # Load the first JSON file
150
- with open(first_json) as f:
151
- coco1 = json.load(f)
152
-
153
- # Load the second JSON file
154
- with open(second_json) as f:
155
- coco2 = json.load(f)
156
-
157
- # Update IDs in coco2 to ensure they are unique and do not overlap with coco1
158
- max_image_id = max(image['id'] for image in coco1['images'])
159
- max_annotation_id = max(annotation['id'] for annotation in coco1['annotations'])
160
- max_category_id = max(category['id'] for category in coco1['categories'])
161
-
162
- # Add an offset to the second coco IDs
163
- image_id_offset = max_image_id + 1
164
- annotation_id_offset = max_annotation_id + 1
165
- # category_id_offset = max_category_id + 1
166
-
167
- # Apply offset to images, annotations, and categories in the second JSON
168
- for image in coco2['images']:
169
- image['id'] += image_id_offset
170
-
171
- for annotation in coco2['annotations']:
172
- annotation['id'] += annotation_id_offset
173
- annotation['image_id'] += image_id_offset # Update the image_id reference
174
-
175
- # Merge the two datasets
176
- merged_coco = {
177
- 'images': coco1['images'] + coco2['images'],
178
- 'annotations': coco1['annotations'] + coco2['annotations'],
179
- 'categories': coco1['categories'] # If categories are the same; otherwise, merge as needed
180
- }
181
-
182
- # Save the merged annotations to a new JSON file
183
- with open(output_path, 'w') as f:
184
- json.dump(merged_coco, f)
185
-
186
- def percentages(n_splits, image_ids, labels):
187
- labels_percentages = {}
188
- for i in range(n_splits):
189
- train_k, valid_k = 0, 0
190
- train_labels_counts = {'0':0, '1':0, '2':0, '3':0, '4':0, '5':0}
191
- valid_labels_counts = {'0':0, '1':0, '2':0, '3':0, '4':0, '5':0}
192
- for j in range(len(image_ids[i]['train'])):
193
- for cat in list(labels[i]['train'][j]):
194
- train_labels_counts[cat] += 1
195
- train_k+=1
196
-
197
- for j in range(len(image_ids[i]['valid'])):
198
- for cat in list(labels[i]['valid'][j]):
199
- valid_labels_counts[cat] += 1
200
- valid_k+=1
201
-
202
- train_labels_counts = {cat:counts * 1.0/train_k for cat, counts in train_labels_counts.items()}
203
- valid_labels_counts = {cat:counts * 1.0/valid_k for cat, counts in valid_labels_counts.items()}
204
-
205
- labels_percentages[i] = {'train':train_labels_counts, 'valid': valid_labels_counts}
206
-
207
- return labels_percentages
208
-
209
- def make_split(data_in, train_index, valid_index):
210
-
211
- data_in_train = data_in.copy()
212
- data_in_valid = data_in.copy()
213
-
214
- data_in_train['images'] = [data_in['images'][train_index[i][0]] for i in range(len(train_index))]
215
- data_in_valid['images'] = [data_in['images'][valid_index[i][0]] for i in range(len(valid_index))]
216
- train_annot_ids, valid_annot_ids = [], []
217
-
218
- for img_i in data_in_train['images']:
219
- annotation_ids = [annot['id'] for annot in data_in_train['annotations'] if annot['image_id'] == img_i['id']]
220
- train_annot_ids +=annotation_ids
221
-
222
- for img_i in data_in_valid['images']:
223
- annotation_ids = [annot['id'] for annot in data_in_valid['annotations'] if annot['image_id'] == img_i['id']]
224
- valid_annot_ids +=annotation_ids
225
-
226
- data_in_train['annotations'] = [data_in_train['annotations'][id] for id in train_annot_ids]
227
- data_in_valid['annotations'] = [data_in_valid['annotations'][id] for id in valid_annot_ids]
228
-
229
- print(len(data_in_train['images']), len(data_in_valid['images']))
230
- return data_in_train, data_in_valid
231
-
232
- def correct_bboxes(annotations):
233
- for ann in annotations:
234
- # If the segmentation is in polygon format (COCO polygon)
235
- if isinstance(ann['segmentation'], list):
236
-
237
- points = np.array(ann['segmentation']).reshape(-1, 2)
238
- x_min, y_min = np.inf, np.inf
239
- x_max, y_max = -np.inf, -np.inf
240
- x_min = min(x_min, points[:, 0].min())
241
- y_min = min(y_min, points[:, 1].min())
242
- x_max = max(x_max, points[:, 0].max())
243
- y_max = max(y_max, points[:, 1].max())
244
-
245
- width = x_max - x_min
246
- height = y_max - y_min
247
-
248
- # The bbox in COCO format [x_min, y_min, width, height]
249
- bbox = [x_min, y_min, width, height]
250
- x, y, w, h = map(int, bbox)
251
- ann['bbox'] = [x, y, w, h]
252
-
253
- return annotations
254
-
255
- def highlight_max(s):
256
- is_max = s == s.max()
257
- return ['background-color: yellow' if v else '' for v in is_max]
258
-
259
- def highlight_max_str(s):
260
-
261
- cats = []
262
- for cat in s:
263
- cats.append([float(match) for match in re.findall(r"[-+]?[0-9]*\.?[0-9]+", cat)][0])
264
-
265
- is_max = cats == np.max(cats)
266
- return ['background-color: yellow' if v else '' for v in is_max]
267
-
268
- def read_yolo_annotations(annotation_file):
269
- with open(annotation_file, 'r') as file:
270
- lines = file.readlines()
271
-
272
- annotations = []
273
- for line in lines:
274
- parts = line.strip().split()
275
- class_id = int(parts[0])
276
- points = list(map(float, parts[1:]))
277
- annotations.append((class_id, points))
278
-
279
- return annotations
280
-
281
- def display_image_with_annotations(coco, cat_names, image_id):
282
- img = coco.loadImgs(image_id)[0]
283
- image_path = os.path.join('./mskf_0/train/', img['file_name'])
284
- I = Image.open(image_path)
285
- plt.imshow(I); plt.axis('off')
286
- ann_ids = coco.getAnnIds(imgIds=img['id'], iscrowd=None)
287
- anns = coco.loadAnns(ann_ids)
288
- ax = plt.gca()
289
-
290
- for ann in anns:
291
- bbox = ann['bbox']
292
- rect = patches.Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3],
293
- linewidth=2, edgecolor='b', facecolor='none')
294
- ax.add_patch(rect)
295
- ax.text(bbox[0], bbox[1] - 5, cat_names[ann['category_id']],
296
- color='blue', fontsize=12, bbox=dict(facecolor='white', alpha=0.5))
297
-
298
- plt.show()
299
-
300
- def plot_segmentations(image_path, annotations, category_mapping):
301
- image = Image.open(image_path)
302
- width, height = image.size
303
- draw = ImageDraw.Draw(image)
304
-
305
- try:
306
- font = ImageFont.truetype("DejaVuSans.ttf", 16) # Load a font
307
- except IOError:
308
- font = ImageFont.load_default()
309
-
310
- for class_id, points in annotations:
311
- # Scale points from normalized coordinates to image dimensions
312
- scaled_points = [(p[0] * width, p[1] * height) for p in zip(points[0::2], points[1::2])]
313
- draw.polygon(scaled_points, outline='green', fill=None)
314
-
315
- category_name = category_mapping[class_id][0]
316
- centroid_x = sum([p[0] for p in scaled_points]) / len(scaled_points)
317
- centroid_y = sum([p[1] for p in scaled_points]) / len(scaled_points)
318
- draw.text((centroid_x, centroid_y), category_name, fill='red', font=font, anchor='ms')
319
-
320
- plt.imshow(image)
321
- plt.axis('off')
322
- plt.show()