lowercaseonly commited on
Commit
81e7fbf
1 Parent(s): e398178

Updated Utility Files

Browse files
Files changed (7) hide show
  1. README.md +5 -0
  2. classes_color.json +3 -0
  3. consistency.py +123 -56
  4. loader.py +29 -12
  5. requirements.txt +14 -0
  6. segmentation.py +8 -28
  7. utils.py +19 -4
README.md CHANGED
@@ -28,6 +28,7 @@ gtdh-hd
28
  | loader.py # Simple Dataset Loader and Storage Functions
29
  │ segmentation.py # Multiclass Segmentation Generation
30
  │ utils.py # Helper Functions
 
31
  └───drafter_D
32
  │ └───annotations # Bounding Box Annotations
33
  │ │ │ CX_DY_PZ.xml
@@ -305,3 +306,7 @@ Rotation annotations are currently work in progress. They should be provided for
305
  ```
306
  labelme --labels "connector" --config "{shift_auto_shape_color: 1}" --nodata
307
  ```
 
 
 
 
 
28
  | loader.py # Simple Dataset Loader and Storage Functions
29
  │ segmentation.py # Multiclass Segmentation Generation
30
  │ utils.py # Helper Functions
31
+ │ requirements.txt # Requirements for Scripts
32
  └───drafter_D
33
  │ └───annotations # Bounding Box Annotations
34
  │ │ │ CX_DY_PZ.xml
 
306
  ```
307
  labelme --labels "connector" --config "{shift_auto_shape_color: 1}" --nodata
308
  ```
309
+
310
+ ## Licence
311
+ The entire content of this repository, including all image files, annotation files as well as has sourcecode, metadata and documentation has been published under the [Creative Commons Attribution Share Alike Licence 3.0](https://creativecommons.org/licenses/by-sa/3.0/).
312
+
classes_color.json CHANGED
@@ -69,5 +69,8 @@
69
  "antenna": [112,128,144],
70
  "crystal": [230,230,250],
71
 
 
 
 
72
  "unknown": [240,255,240]
73
  }
 
69
  "antenna": [112,128,144],
70
  "crystal": [230,230,250],
71
 
72
+ "magnetic": [0,230,230],
73
+ "optical": [230,0,230],
74
+
75
  "unknown": [240,255,240]
76
  }
consistency.py CHANGED
@@ -7,6 +7,7 @@ import re
7
 
8
  # Project Imports
9
  from loader import load_classes, load_properties, read_dataset, write_dataset, file_name
 
10
 
11
  # Third-Party Imports
12
  import matplotlib.pyplot as plt
@@ -29,41 +30,42 @@ MAPPING_LOOKUP = {
29
  }
30
 
31
 
32
- def consistency(db: list, classes: dict, recover: dict = {}) -> tuple:
33
  """Checks Whether Annotation Classes are in provided Classes Dict and Attempts Recovery"""
34
 
35
  total, ok, mapped, faulty, rotation, text = 0, 0, 0, 0, 0, 0
36
 
37
  for sample in db:
38
- for bbox in sample["bboxes"] + sample["polygons"] + sample["points"]:
39
  total += 1
40
 
41
- if bbox["class"] in classes:
42
  ok += 1
43
 
44
- if bbox["class"] in recover:
45
- bbox["class"] = recover[bbox["class"]]
46
  mapped += 1
47
 
48
- if bbox["class"] not in classes and bbox["class"] not in recover:
49
- print(f"Can't recover faulty label in {file_name(sample)}: {bbox['class']}")
50
  faulty += 1
51
 
52
- if bbox["rotation"] is not None:
53
  rotation += 1
54
 
55
- if bbox["class"] == "text" and bbox["text"] is None:
56
- print(f"Missing Text in {file_name(sample)} -> {bbox['xmin']}, {bbox['ymin']}")
 
57
 
58
- if bbox["text"] is not None:
59
- if bbox["text"].strip() != bbox["text"]:
60
- print(f"Removing leading of trailing spaces from: {bbox['text']}")
61
- bbox["text"] = bbox["text"].strip()
62
 
63
- if bbox["class"] != "text":
64
- print(f"Text string outside Text BB in {file_name(sample)}: {bbox['class']}: {bbox['text']}")
65
 
66
- text += 1
67
 
68
  return total, ok, mapped, faulty, rotation, text
69
 
@@ -109,12 +111,21 @@ def circuit_annotations(db: list, classes: dict) -> None:
109
  plt.show()
110
 
111
 
 
 
 
 
 
 
 
 
 
112
  def class_distribution(db: list, classes: dict) -> None:
113
  """Plots the Class Distribution over the Dataset"""
114
 
115
  class_nbrs = np.arange(len(classes))
116
- class_counts = [sum([len([bbox for bbox in sample["bboxes"] + sample["polygons"] + sample["points"]
117
- if bbox["class"] == cls])
118
  for sample in db]) for cls in classes]
119
 
120
  bars = plt.bar(class_nbrs, class_counts)
@@ -154,12 +165,15 @@ def image_count(drafter: int = None, segmentation: bool = False) -> int:
154
  (not drafter or f"drafter_{drafter}{os.sep}" in root)])
155
 
156
 
157
- def read_check_write(classes: dict, drafter: int = None, segmentation: bool = False):
158
  """Reads Annotations, Checks Consistency with Provided Classes
159
  Writes Corrected Annotations Back and Returns the Annotations"""
160
 
161
  db = read_dataset(drafter=drafter, segmentation=segmentation)
162
- ann_total, ann_ok, ann_mapped, ann_faulty, ann_rot, ann_text = consistency(db, classes)
 
 
 
163
  write_dataset(db, segmentation=segmentation)
164
 
165
  print("")
@@ -179,53 +193,101 @@ def read_check_write(classes: dict, drafter: int = None, segmentation: bool = Fa
179
  return db
180
 
181
 
182
- def text_statistics(db: list, plot_unique_labels: bool = False):
183
- """Generates and Plots Statistics on Text Classes"""
184
-
185
- print("")
186
- print(" Text Statistics")
187
- print("---------------------")
188
-
189
- text_bbs = len([bbox for sample in db for bbox in sample["bboxes"] if bbox["class"] == "text"])
190
- print(f"Text BB Annotations: {text_bbs}")
191
-
192
- text_labels = [bbox["text"] for sample in db for bbox in sample["bboxes"] if bbox["text"] is not None]
193
- print(f"Overall Text Label Count: {len(text_labels)}")
194
 
195
- text_labels_unique = set(text_labels)
196
- print(f"Unique Text Label Count: {len(text_labels_unique)}")
197
 
198
- print(f"Total Character Count: {sum([len(text_label) for text_label in text_labels])}")
199
 
200
- print("\nSet of all characters occurring in all text labels:")
201
- char_set = set([char_set for label in text_labels_unique for char_set in label])
202
- chars = sorted(list(char_set))
203
- print(chars)
204
 
205
  char_nbrs = np.arange(len(chars))
206
  char_counts = [sum([len([None for text_char in text_label if text_char == char])
207
- for text_label in text_labels])
208
  for char in chars]
209
  plt.bar(char_nbrs, char_counts)
210
  plt.xticks(char_nbrs, chars)
211
- plt.title("Character Frequencies")
212
  plt.xlabel("Character")
213
  plt.ylabel("Overall Count")
214
  plt.show()
215
- print("\nCharacter Frequencies:")
216
- print({char: 1/char_count for char, char_count in zip(chars, char_counts)})
217
-
218
- max_text_len = max([len(text_label) for text_label in text_labels])
219
- text_lengths = np.arange(max_text_len)+1
220
- text_count_by_length = [len([None for text_label in text_labels if len(text_label) == text_length])
221
- for text_length in text_lengths]
222
- plt.bar(text_lengths, text_count_by_length)
223
- plt.xticks(text_lengths, rotation=90)
224
- plt.title("Text Length Distribution")
225
- plt.xlabel("Character Count")
226
- plt.ylabel("Annotation Count")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227
  plt.show()
228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
229
  text_instances = text_labels_unique if plot_unique_labels else text_labels
230
  text_classes_names = []
231
  text_classes_instances = []
@@ -252,18 +314,23 @@ def text_statistics(db: list, plot_unique_labels: bool = False):
252
  plt.tight_layout()
253
  plt.show()
254
 
 
 
 
 
255
 
256
 
257
  if __name__ == "__main__":
258
  drafter_selected = int(sys.argv[1]) if len(sys.argv) == 2 else None
259
  classes = load_classes()
260
 
261
- #db_poly = read_check_write(classes, drafter_selected, segmentation=True)
262
  db_bb = read_check_write(classes, drafter_selected)
 
263
 
264
  class_sizes(db_bb, classes)
265
  circuit_annotations(db_bb, classes)
 
266
  class_distribution(db_bb, classes)
267
- #class_distribution(db_poly, classes)
268
  consistency_circuit(db_bb, classes)
269
  text_statistics(db_bb)
 
7
 
8
  # Project Imports
9
  from loader import load_classes, load_properties, read_dataset, write_dataset, file_name
10
+ from utils import bbdist
11
 
12
  # Third-Party Imports
13
  import matplotlib.pyplot as plt
 
30
  }
31
 
32
 
33
+ def consistency(db: list, classes: dict, recover: dict = {}, skip_texts=False) -> tuple:
34
  """Checks Whether Annotation Classes are in provided Classes Dict and Attempts Recovery"""
35
 
36
  total, ok, mapped, faulty, rotation, text = 0, 0, 0, 0, 0, 0
37
 
38
  for sample in db:
39
+ for annotation in sample["bboxes"] + sample["polygons"] + sample["points"]:
40
  total += 1
41
 
42
+ if annotation["class"] in classes:
43
  ok += 1
44
 
45
+ if annotation["class"] in recover:
46
+ annotation["class"] = recover[annotation["class"]]
47
  mapped += 1
48
 
49
+ if annotation["class"] not in classes and annotation["class"] not in recover:
50
+ print(f"Can't recover faulty label in {file_name(sample)}: {annotation['class']}")
51
  faulty += 1
52
 
53
+ if annotation["rotation"] is not None:
54
  rotation += 1
55
 
56
+ if not skip_texts:
57
+ if annotation["class"] == "text" and annotation["text"] is None:
58
+ print(f"Missing Text in {file_name(sample)} -> {annotation['xmin']}, {annotation['ymin']}")
59
 
60
+ if annotation["text"] is not None:
61
+ if annotation["text"].strip() != annotation["text"]:
62
+ print(f"Removing leading of trailing spaces from: {annotation['text']}")
63
+ annotation["text"] = annotation["text"].strip()
64
 
65
+ if annotation["class"] != "text":
66
+ print(f"Text string outside Text Annotation in {file_name(sample)} [{annotation['xmin']:4}, {annotation['ymin']:4}]: {annotation['class']}: {annotation['text']}")
67
 
68
+ text += 1
69
 
70
  return total, ok, mapped, faulty, rotation, text
71
 
 
111
  plt.show()
112
 
113
 
114
+ def annotation_distribution(db: list) -> None:
115
+
116
+ amount_distribution([sample['bboxes'] for sample in db],
117
+ "Image Sample Count by BB Annotation Count",
118
+ "BB Annotation Count",
119
+ "Image Sample Count",
120
+ ticks=False)
121
+
122
+
123
  def class_distribution(db: list, classes: dict) -> None:
124
  """Plots the Class Distribution over the Dataset"""
125
 
126
  class_nbrs = np.arange(len(classes))
127
+ class_counts = [sum([len([annotation for annotation in sample["bboxes"] + sample["polygons"] + sample["points"]
128
+ if annotation["class"] == cls])
129
  for sample in db]) for cls in classes]
130
 
131
  bars = plt.bar(class_nbrs, class_counts)
 
165
  (not drafter or f"drafter_{drafter}{os.sep}" in root)])
166
 
167
 
168
+ def read_check_write(classes: dict, drafter: int = None, segmentation: bool = False) -> list:
169
  """Reads Annotations, Checks Consistency with Provided Classes
170
  Writes Corrected Annotations Back and Returns the Annotations"""
171
 
172
  db = read_dataset(drafter=drafter, segmentation=segmentation)
173
+ ann_total, ann_ok, ann_mapped, ann_faulty, ann_rot, ann_text = consistency(db,
174
+ classes,
175
+ MAPPING_LOOKUP,
176
+ skip_texts=segmentation)
177
  write_dataset(db, segmentation=segmentation)
178
 
179
  print("")
 
193
  return db
194
 
195
 
196
+ def unique_characters(texts: list) -> list:
197
+ """Returns the Sorted Set of Unique Characters"""
 
 
 
 
 
 
 
 
 
 
198
 
199
+ char_set = set([char for text in texts for char in text])
200
+ return sorted(list(char_set))
201
 
 
202
 
203
+ def character_distribution(texts: list, chars: list):
204
+ """Plots and Returns the Character Distribution"""
 
 
205
 
206
  char_nbrs = np.arange(len(chars))
207
  char_counts = [sum([len([None for text_char in text_label if text_char == char])
208
+ for text_label in texts])
209
  for char in chars]
210
  plt.bar(char_nbrs, char_counts)
211
  plt.xticks(char_nbrs, chars)
212
+ plt.title("Character Distribution")
213
  plt.xlabel("Character")
214
  plt.ylabel("Overall Count")
215
  plt.show()
216
+
217
+ return char_counts
218
+
219
+
220
+ def amount_distribution(list_of_lists: list, title: str, x_label: str, y_label: str, ticks: bool = True) -> None:
221
+ """Plots a Histogram of the Amount of Things Contained in a List of Lists"""
222
+
223
+ max_bin = max([len(lst) for lst in list_of_lists])
224
+ bin_numbers = np.arange(max_bin)+1
225
+ text_count_by_length = [len([None for lst in list_of_lists if len(lst) == amount])
226
+ for amount in bin_numbers]
227
+ plt.bar(bin_numbers, text_count_by_length)
228
+
229
+ if ticks:
230
+ plt.xticks(bin_numbers, rotation=90)
231
+
232
+ plt.title(title)
233
+ plt.xlabel(x_label)
234
+ plt.ylabel(y_label)
235
+ plt.show()
236
+
237
+
238
+ def text_proximity(db: list, cls_name: str, cls_regex: str):
239
+ """Proximity-Based Regex Validation"""
240
+
241
+ cls_stat = {}
242
+
243
+ for sample in db:
244
+ bbs_text = [bbox for bbox in sample["bboxes"] if bbox["class"] == "text"]
245
+ bbs_symbol = [bbox for bbox in sample["bboxes"] if bbox["class"] not in ["text", "junction", "crossover"]]
246
+
247
+ for bb_text in bbs_text:
248
+ if bb_text["text"]:
249
+ if re.match(cls_regex, bb_text["text"]):
250
+ bb_closest_class = sorted(bbs_symbol, key=lambda bb: bbdist(bb_text, bb))[0]["class"]
251
+ cls_stat[bb_closest_class] = cls_stat.get(bb_closest_class, 0) + 1
252
+
253
+ cls_stat = sorted(cls_stat.items(), key=lambda cls: -cls[1])
254
+ print(cls_stat)
255
+ plt.bar(range(len(cls_stat)), [name for _, name in cls_stat])
256
+ plt.xticks(range(len(cls_stat)), labels=[name for name, _ in cls_stat], rotation=90)
257
+ plt.title(f"Neighbor Distribution for {cls_name} Text Annotations")
258
+ plt.xlabel("Symbol Class")
259
+ plt.ylabel("Number of Closest Neighbors")
260
+ plt.tight_layout()
261
  plt.show()
262
 
263
+
264
+ def text_statistics(db: list, plot_unique_labels: bool = False):
265
+ """Generates and Plots Statistics on Text Classes"""
266
+
267
+ text_bbs = [bbox for sample in db for bbox in sample["bboxes"] if bbox["class"] == "text"]
268
+ text_labels = [bbox["text"] for bbox in text_bbs if type(bbox["text"]) is str and len(text_bbs) > 0]
269
+ text_labels_unique = set(text_labels)
270
+ chars_unique = unique_characters(text_labels)
271
+ char_counts = character_distribution(text_labels, chars_unique)
272
+ amount_distribution(text_labels, "Text Length Distribution", "Character Count", "Annotation Count")
273
+
274
+ print("")
275
+ print(" Text Statistics")
276
+ print("---------------------")
277
+ print(f"Text BB Annotations: {len(text_bbs)}")
278
+ print(f"Overall Text Label Count: {len(text_labels)}")
279
+ print(f"Annotation Completeness: {100*len(text_labels)/len(text_bbs):.2f}%")
280
+ print(f"Unique Text Label Count: {len(text_labels_unique)}")
281
+ print(f"Total Character Count: {sum([len(text_label) for text_label in text_labels])}")
282
+ print(f"Character Types: {len(chars_unique)}")
283
+ print("\n\nSet of all characters occurring in all text labels:")
284
+ print(chars_unique)
285
+ print("\n\nSet of Text Labels:")
286
+ print(text_labels_unique)
287
+
288
+ print("\nCharacter Frequencies:")
289
+ print({char: 1/char_count for char, char_count in zip(chars_unique, char_counts)})
290
+
291
  text_instances = text_labels_unique if plot_unique_labels else text_labels
292
  text_classes_names = []
293
  text_classes_instances = []
 
314
  plt.tight_layout()
315
  plt.show()
316
 
317
+ text_proximity(db, "Capacitor Name", "^C[0-9]+$")
318
+ text_proximity(db, "Resistor Name", "^R[0-9]+$")
319
+ text_proximity(db, "Inductor Name", "^L[0-9]+$")
320
+
321
 
322
 
323
  if __name__ == "__main__":
324
  drafter_selected = int(sys.argv[1]) if len(sys.argv) == 2 else None
325
  classes = load_classes()
326
 
 
327
  db_bb = read_check_write(classes, drafter_selected)
328
+ db_poly = read_check_write(classes, drafter_selected, segmentation=True)
329
 
330
  class_sizes(db_bb, classes)
331
  circuit_annotations(db_bb, classes)
332
+ annotation_distribution(db_bb)
333
  class_distribution(db_bb, classes)
334
+ class_distribution(db_poly, classes)
335
  consistency_circuit(db_bb, classes)
336
  text_statistics(db_bb)
loader.py CHANGED
@@ -49,10 +49,16 @@ def _sample_info_from_path(path: str) -> tuple:
49
  return drafter.split("_")[1], int(circuit[1:]), int(drawing[1:]), int(picture[1:]), suffix
50
 
51
 
 
 
 
 
 
 
52
  def file_name(sample: dict) -> str:
53
  """return the Raw Image File Name of a Sample"""
54
 
55
- return f"C{sample['circuit']}_D{sample['drawing']}_P{sample['picture']}.{sample['format']}"
56
 
57
 
58
  def read_pascal_voc(path: str) -> dict:
@@ -127,10 +133,15 @@ def read_labelme(path: str) -> dict:
127
  drafter, circuit, drawing, picture, _ = _sample_info_from_path(path)
128
  suffix = json_data['imagePath'].split(".")[-1]
129
 
130
- return {'img_path': json_data['imagePath'], 'drafter': drafter, 'circuit': circuit,
131
  'drawing': drawing, 'picture': picture, 'format': suffix,
132
  'height': json_data['imageHeight'], 'width': json_data['imageWidth'], 'bboxes': [],
133
- 'polygons': [{'class': shape['label'], 'points': shape['points'],
 
 
 
 
 
134
  'rotation': shape.get('rotation', None),
135
  'text': shape.get('text', None),
136
  'group': shape.get('group_id', None)}
@@ -150,19 +161,25 @@ def write_labelme(geo_data: dict, path: str = None) -> None:
150
  f"C{geo_data['circuit']}_D{geo_data['drawing']}_P{geo_data['picture']}.json")
151
 
152
  with open(path, 'w') as json_file:
153
- json.dump({'version': '3.16.7', 'flags': {}, 'lineColor': [0, 255, 0, 128], 'fillColor': [255, 0, 0, 128],
154
- 'imagePath': geo_data['img_path'], 'imageData': None,
155
- 'imageHeight': geo_data['height'], 'imageWidth': geo_data['width'],
156
- 'shapes': [{'label': polygon['class'], 'line_color': None, 'fill_color': None,
157
  'points': polygon['points'],
158
- **({'group_id': polygon['group']} if polygon['group'] is not None else {}),
 
159
  **({'rotation': polygon['rotation']} if polygon.get('rotation', None) else {}),
160
  **({'text': polygon['text']} if polygon.get('text', None) else {}),
161
  'shape_type': 'polygon', 'flags': {}}
162
  for polygon in geo_data['polygons']] +
163
  [{'label': point['class'], 'points': [[point['points'][0], point['points'][1]]],
164
  'group_id': point['group'], 'shape_type': 'point', 'flags': {}}
165
- for point in geo_data['points']]},
 
 
 
 
 
 
166
  json_file, indent=2)
167
 
168
 
@@ -206,7 +223,7 @@ def read_snippets(**kwargs):
206
 
207
  for img, annotations in read_images(**kwargs):
208
  for bbox in annotations['bboxes']:
209
- snippets += [(img[bbox['ymin']:bbox['ymax'], bbox['xmin']:bbox['xmax']], bbox)]
210
 
211
  return snippets
212
 
@@ -217,11 +234,11 @@ if __name__ == "__main__":
217
  os.mkdir("test")
218
  args = {'drafter': int(sys.argv[1])} if len(sys.argv) == 2 else {}
219
 
220
- for nbr, (snippet, bbox) in enumerate(read_snippets(**args)):
221
  if bbox['class'] == "text" and bbox.get("text", ""):
222
  if bbox['rotation'] == 90:
223
  snippet = cv2.rotate(snippet, cv2.ROTATE_90_CLOCKWISE)
224
  if bbox['rotation'] == 270:
225
  snippet = cv2.rotate(snippet, cv2.ROTATE_90_COUNTERCLOCKWISE)
226
 
227
- cv2.imwrite(join("test", f"{bbox['text']}.{nbr}.png"), snippet)
 
49
  return drafter.split("_")[1], int(circuit[1:]), int(drawing[1:]), int(picture[1:]), suffix
50
 
51
 
52
+ def sample_name(sample: dict) -> str:
53
+ """Returns the Name of a Sample"""
54
+
55
+ return f"C{sample['circuit']}_D{sample['drawing']}_P{sample['picture']}"
56
+
57
+
58
  def file_name(sample: dict) -> str:
59
  """return the Raw Image File Name of a Sample"""
60
 
61
+ return f"{sample_name(sample)}.{sample['format']}"
62
 
63
 
64
  def read_pascal_voc(path: str) -> dict:
 
133
  drafter, circuit, drawing, picture, _ = _sample_info_from_path(path)
134
  suffix = json_data['imagePath'].split(".")[-1]
135
 
136
+ return {'img_path': json_data['imagePath'].replace("\\", "/"), 'drafter': drafter, 'circuit': circuit,
137
  'drawing': drawing, 'picture': picture, 'format': suffix,
138
  'height': json_data['imageHeight'], 'width': json_data['imageWidth'], 'bboxes': [],
139
+ 'polygons': [{'class': shape['label'],
140
+ 'bbox': {'xmin': min(point[0] for point in shape['points']),
141
+ 'ymin': min(point[1] for point in shape['points']),
142
+ 'xmax': max(point[0] for point in shape['points']),
143
+ 'ymax': max(point[1] for point in shape['points'])},
144
+ 'points': shape['points'],
145
  'rotation': shape.get('rotation', None),
146
  'text': shape.get('text', None),
147
  'group': shape.get('group_id', None)}
 
161
  f"C{geo_data['circuit']}_D{geo_data['drawing']}_P{geo_data['picture']}.json")
162
 
163
  with open(path, 'w') as json_file:
164
+ json.dump({'version': '5.2.0',
165
+ 'flags': {},
166
+ 'shapes': [{'line_color': None, 'fill_color': None,'label': polygon['class'],
 
167
  'points': polygon['points'],
168
+ 'group_id': polygon.get('group', None),
169
+ 'description': polygon.get('description', None),
170
  **({'rotation': polygon['rotation']} if polygon.get('rotation', None) else {}),
171
  **({'text': polygon['text']} if polygon.get('text', None) else {}),
172
  'shape_type': 'polygon', 'flags': {}}
173
  for polygon in geo_data['polygons']] +
174
  [{'label': point['class'], 'points': [[point['points'][0], point['points'][1]]],
175
  'group_id': point['group'], 'shape_type': 'point', 'flags': {}}
176
+ for point in geo_data['points']],
177
+ 'imagePath': geo_data['img_path'],
178
+ 'imageData': None,
179
+ 'imageHeight': geo_data['height'],
180
+ 'imageWidth': geo_data['width'],
181
+ 'lineColor': [0, 255, 0, 128],
182
+ 'fillColor': [255, 0, 0, 128]},
183
  json_file, indent=2)
184
 
185
 
 
223
 
224
  for img, annotations in read_images(**kwargs):
225
  for bbox in annotations['bboxes']:
226
+ snippets += [(img[bbox['ymin']:bbox['ymax'], bbox['xmin']:bbox['xmax']], bbox, sample_name(annotations))]
227
 
228
  return snippets
229
 
 
234
  os.mkdir("test")
235
  args = {'drafter': int(sys.argv[1])} if len(sys.argv) == 2 else {}
236
 
237
+ for snippet, bbox, sample in read_snippets(**args):
238
  if bbox['class'] == "text" and bbox.get("text", ""):
239
  if bbox['rotation'] == 90:
240
  snippet = cv2.rotate(snippet, cv2.ROTATE_90_CLOCKWISE)
241
  if bbox['rotation'] == 270:
242
  snippet = cv2.rotate(snippet, cv2.ROTATE_90_COUNTERCLOCKWISE)
243
 
244
+ cv2.imwrite(join("test", f"{bbox['text']}___{sample}_{bbox['ymin']}_{bbox['ymax']}_{bbox['xmin']}_{bbox['xmax']}.png"), snippet)
requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ contourpy==1.1.0
2
+ cv==1.0.0
3
+ cycler==0.11.0
4
+ fonttools==4.40.0
5
+ kiwisolver==1.4.4
6
+ lxml==4.9.2
7
+ matplotlib==3.7.1
8
+ numpy==1.25.0
9
+ opencv-python==4.7.0.72
10
+ packaging==23.1
11
+ Pillow==9.5.0
12
+ pyparsing==3.1.0
13
+ python-dateutil==2.8.2
14
+ six==1.16.0
segmentation.py CHANGED
@@ -9,7 +9,7 @@ from math import dist
9
 
10
  # Project Imports
11
  from loader import read_pascal_voc, read_labelme, write_labelme, load_classes_ports
12
- from utils import transform, associated_keypoints
13
 
14
  # Third-Party Imports
15
  import cv2
@@ -34,9 +34,8 @@ def binary_to_multi_seg_map(drafter: str, sample: str, suffix: str, source_folde
34
  shape_mask = np.ones(bin_seg_map.shape, dtype=np.uint8)*255
35
  geo_data = read_labelme(join(drafter, source_folder, f"{sample}.json"))
36
 
37
- for shape in sorted(geo_data["polygons"],
38
- key=lambda shape: -(max([p[0] for p in shape['points']])-min([p[0] for p in shape['points']])) *
39
- (max([p[1] for p in shape['points']])-min([p[1] for p in shape['points']]))):
40
  cv2.fillPoly(shape_mask,
41
  pts=[np.array(shape["points"], dtype=np.int32)],
42
  color=color_map[shape["class"]])
@@ -80,10 +79,10 @@ def generate_keypoints(drafter: str, sample: str, suffix: str, source_folder: st
80
  shape['group'] = nbr
81
 
82
  if shape['class'] != "text" and shape['class'] != "wire":
83
- x_min = max(int(min([p[0] for p in shape['points']]))-margin, 0)
84
- x_max = min(int(max([p[0] for p in shape['points']]))+margin, bin_seg_map.shape[1])
85
- y_min = max(int(min([p[1] for p in shape['points']]))-margin, 0)
86
- y_max = min(int(max([p[1] for p in shape['points']]))+margin, bin_seg_map.shape[0])
87
  cropout = bin_seg_map[y_min:y_max, x_min:x_max]
88
  shape_mask = np.zeros((y_max-y_min, x_max-x_min), dtype=np.uint8)
89
  cv2.polylines(shape_mask, pts=[np.array(shape["points"]-np.array([[x_min, y_min]]), dtype=np.int32)],
@@ -210,25 +209,6 @@ def refine_polygons(drafter: str, sample: str, suffix: str, source_folder: str,
210
  write_labelme(geo_data, join(drafter, target_folder, f"{sample}.json"))
211
 
212
 
213
- def bounding_box(points):
214
- xmin = min(point[0] for point in points)
215
- ymin = min(point[1] for point in points)
216
- xmax = max(point[0] for point in points)
217
- ymax = max(point[1] for point in points)
218
- return [xmin, ymin, xmax, ymax]
219
-
220
-
221
- def overlap(bbox1, bbox2):
222
-
223
- if bbox1["xmin"] > bbox2[2] or bbox1["xmax"] < bbox2[0]:
224
- return False
225
-
226
- if bbox1["ymin"] > bbox2[3] or bbox1["ymax"] < bbox2[1]:
227
- return False
228
-
229
- return True
230
-
231
-
232
  def find_closest_points(list1, list2):
233
  reordered_list2 = []
234
  for x1, y1 in list1:
@@ -256,7 +236,7 @@ def connector_type_assignment(drafter: str, sample: str, suffix: str, source_fol
256
  connectors = associated_keypoints(instances, shape)
257
  cls_ports = classes_ports[shape["class"]]
258
  bboxes_match = [bbox for bbox in bboxes['bboxes']
259
- if overlap(bbox, bounding_box(shape["points"])) and bbox['class'] == shape['class']]
260
 
261
  if len(cls_ports) != len(connectors):
262
  print(f" Bad Connector Count: {shape['class']} {shape['points'][0]} -> {len(cls_ports)} vs. {len(connectors)}")
 
9
 
10
  # Project Imports
11
  from loader import read_pascal_voc, read_labelme, write_labelme, load_classes_ports
12
+ from utils import transform, associated_keypoints, overlap
13
 
14
  # Third-Party Imports
15
  import cv2
 
34
  shape_mask = np.ones(bin_seg_map.shape, dtype=np.uint8)*255
35
  geo_data = read_labelme(join(drafter, source_folder, f"{sample}.json"))
36
 
37
+ for shape in sorted(geo_data["polygons"], key=lambda shape: -(shape['bbox']['xmax']-shape['bbox']['xmin']) *
38
+ (shape['bbox']['ymax']-shape['bbox']['ymin'])):
 
39
  cv2.fillPoly(shape_mask,
40
  pts=[np.array(shape["points"], dtype=np.int32)],
41
  color=color_map[shape["class"]])
 
79
  shape['group'] = nbr
80
 
81
  if shape['class'] != "text" and shape['class'] != "wire":
82
+ x_min = max(int(shape['bbox']['xmin'])-margin, 0)
83
+ x_max = min(int(shape['bbox']['xmax'])+margin, bin_seg_map.shape[1])
84
+ y_min = max(int(shape['bbox']['ymin'])-margin, 0)
85
+ y_max = min(int(shape['bbox']['ymax'])+margin, bin_seg_map.shape[0])
86
  cropout = bin_seg_map[y_min:y_max, x_min:x_max]
87
  shape_mask = np.zeros((y_max-y_min, x_max-x_min), dtype=np.uint8)
88
  cv2.polylines(shape_mask, pts=[np.array(shape["points"]-np.array([[x_min, y_min]]), dtype=np.int32)],
 
209
  write_labelme(geo_data, join(drafter, target_folder, f"{sample}.json"))
210
 
211
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  def find_closest_points(list1, list2):
213
  reordered_list2 = []
214
  for x1, y1 in list1:
 
236
  connectors = associated_keypoints(instances, shape)
237
  cls_ports = classes_ports[shape["class"]]
238
  bboxes_match = [bbox for bbox in bboxes['bboxes']
239
+ if overlap(bbox, shape["bbox"]) and bbox['class'] == shape['class']]
240
 
241
  if len(cls_ports) != len(connectors):
242
  print(f" Bad Connector Count: {shape['class']} {shape['points'][0]} -> {len(cls_ports)} vs. {len(connectors)}")
utils.py CHANGED
@@ -1,7 +1,7 @@
1
  """utils.py: Helper Functions to keep this Repo Standalone"""
2
 
3
  # System Imports
4
- from math import sin, cos, radians
5
 
6
  __author__ = "Johannes Bayer"
7
  __copyright__ = "Copyright 2023, DFKI"
@@ -42,6 +42,24 @@ def transform(port, bb):
42
  return {"name": port['name'], "position": p}
43
 
44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  def associated_keypoints(instances, shape):
46
  """Returns the points with same group id as the provided polygon"""
47
 
@@ -49,9 +67,6 @@ def associated_keypoints(instances, shape):
49
  if point["group"] == shape["group"] and point["class"] == "connector"]
50
 
51
 
52
- def poly_to_bb():
53
- pass
54
-
55
  def IoU(bb1, bb2):
56
  """Intersection over Union"""
57
 
 
1
  """utils.py: Helper Functions to keep this Repo Standalone"""
2
 
3
  # System Imports
4
+ from math import sin, cos, radians, sqrt
5
 
6
  __author__ = "Johannes Bayer"
7
  __copyright__ = "Copyright 2023, DFKI"
 
42
  return {"name": port['name'], "position": p}
43
 
44
 
45
+ def bbdist(bb1, bb2):
46
+ """Calculates the Distance between two Bounding Box Annotations"""
47
+
48
+ return sqrt(((bb1["xmin"]+bb1["xmax"])/2 - (bb2["xmin"]+bb2["xmax"])/2)**2 +
49
+ ((bb1["ymin"]+bb1["ymax"])/2 - (bb2["ymin"]+bb2["ymax"])/2)**2)
50
+
51
+
52
+ def overlap(bbox1, bbox2):
53
+
54
+ if bbox1["xmin"] > bbox2["xmax"] or bbox1["xmax"] < bbox2["xmin"]:
55
+ return False
56
+
57
+ if bbox1["ymin"] > bbox2["ymax"] or bbox1["ymax"] < bbox2["ymin"]:
58
+ return False
59
+
60
+ return True
61
+
62
+
63
  def associated_keypoints(instances, shape):
64
  """Returns the points with same group id as the provided polygon"""
65
 
 
67
  if point["group"] == shape["group"] and point["class"] == "connector"]
68
 
69
 
 
 
 
70
  def IoU(bb1, bb2):
71
  """Intersection over Union"""
72