Spaces:
Running
Running
correct bug and better pool format
Browse files- .gitignore +4 -0
- modules/display.py +3 -3
- modules/eval.py +36 -23
- modules/streamlit_utils.py +2 -2
- modules/toXML.py +52 -20
- toWizard.py +45 -0
.gitignore
CHANGED
@@ -23,3 +23,7 @@ result_bpmn.bpmn
|
|
23 |
BPMN_creation.ipynb
|
24 |
|
25 |
*.png
|
|
|
|
|
|
|
|
|
|
23 |
BPMN_creation.ipynb
|
24 |
|
25 |
*.png
|
26 |
+
|
27 |
+
*.ipynb
|
28 |
+
|
29 |
+
*.pmw
|
modules/display.py
CHANGED
@@ -75,6 +75,9 @@ def draw_stream(image,
|
|
75 |
new_scaled_size = (int(original_size[0] * scale_), int(original_size[1] * scale_))
|
76 |
|
77 |
for i in range(len(prediction['boxes'])):
|
|
|
|
|
|
|
78 |
box = prediction['boxes'][i]
|
79 |
x1, y1, x2, y2 = box
|
80 |
if resize:
|
@@ -83,9 +86,6 @@ def draw_stream(image,
|
|
83 |
if score < score_threshold:
|
84 |
continue
|
85 |
if draw_boxes:
|
86 |
-
if only_show is not None and only_show != 'all':
|
87 |
-
if prediction['labels'][i] != list(class_dict.values()).index(only_show):
|
88 |
-
continue
|
89 |
#dont show the lanes
|
90 |
if prediction['labels'][i] == list(class_dict.values()).index('lane'):
|
91 |
continue
|
|
|
75 |
new_scaled_size = (int(original_size[0] * scale_), int(original_size[1] * scale_))
|
76 |
|
77 |
for i in range(len(prediction['boxes'])):
|
78 |
+
if only_show is not None and only_show != 'all':
|
79 |
+
if prediction['labels'][i] != list(class_dict.values()).index(only_show):
|
80 |
+
continue
|
81 |
box = prediction['boxes'][i]
|
82 |
x1, y1, x2, y2 = box
|
83 |
if resize:
|
|
|
86 |
if score < score_threshold:
|
87 |
continue
|
88 |
if draw_boxes:
|
|
|
|
|
|
|
89 |
#dont show the lanes
|
90 |
if prediction['labels'][i] == list(class_dict.values()).index('lane'):
|
91 |
continue
|
modules/eval.py
CHANGED
@@ -240,21 +240,26 @@ def regroup_elements_by_pool(boxes, labels, scores, keypoints, class_dict, iou_t
|
|
240 |
if class_dict[labels[i]] == 'messageFlow' or class_dict[labels[i]] == 'lane' or class_dict[labels[i]] == 'pool':
|
241 |
elements_not_in_pool_to_delete.append(i)
|
242 |
#delete the messageflow from the elements_not_in_pool
|
243 |
-
|
244 |
count = 0
|
245 |
for i in elements_not_in_pool:
|
246 |
if labels[i] != list(class_dict.values()).index('sequenceFlow') or labels[i] != list(class_dict.values()).index('messageFlow'):
|
247 |
count += 1
|
248 |
-
if
|
|
|
|
|
|
|
|
|
|
|
249 |
new_pool_index = len(labels)
|
250 |
size_elements = get_size_elements(1)
|
251 |
-
box = calculate_pool_bounds(boxes,labels,
|
252 |
boxes = np.append(boxes, [box], axis=0)
|
253 |
labels = np.append(labels, list(class_dict.values()).index('pool'))
|
254 |
scores = np.append(scores, 1.0)
|
255 |
keypoints = np.append(keypoints, np.zeros((1, 2, 3)), axis=0)
|
256 |
-
pool_dict[new_pool_index] =
|
257 |
-
print(f"Created a new pool index {new_pool_index} with elements: {
|
258 |
|
259 |
# Separate empty pools
|
260 |
non_empty_pools = {k: v for k, v in pool_dict.items() if v}
|
@@ -341,30 +346,35 @@ def correction_labels(boxes, labels, class_dict, pool_dict, flow_links):
|
|
341 |
|
342 |
|
343 |
|
344 |
-
def last_correction(boxes, labels, scores, keypoints, links, best_points, pool_dict, limit_area=10000):
|
345 |
|
346 |
#delete pool that are have only messageFlow on it
|
347 |
delete_pool = []
|
348 |
for pool_index, elements in pool_dict.items():
|
|
|
|
|
|
|
|
|
|
|
349 |
if all([labels[i] in [list(class_dict.values()).index('messageFlow'),
|
350 |
list(class_dict.values()).index('sequenceFlow'),
|
351 |
list(class_dict.values()).index('dataAssociation'),
|
352 |
list(class_dict.values()).index('lane')] for i in elements]):
|
353 |
if len(elements) > 0:
|
354 |
-
delete_pool.append(
|
355 |
-
print(f"Pool {pool_index} contains only arrow elements, deleting it")
|
356 |
|
357 |
#calcul the area of the pool$
|
358 |
-
if
|
359 |
-
pool = boxes[
|
360 |
area = (pool[2] - pool[0]) * (pool[3] - pool[1])
|
361 |
if len(pool_dict)>1 and area < limit_area:
|
362 |
-
delete_pool.append(
|
363 |
print(f"Pool {pool_index} is too small, deleting it")
|
364 |
|
365 |
-
if is_vertical(boxes[
|
366 |
-
delete_pool.append(
|
367 |
-
print(f"Pool {
|
368 |
|
369 |
|
370 |
delete_elements = []
|
@@ -390,6 +400,7 @@ def last_correction(boxes, labels, scores, keypoints, links, best_points, pool_d
|
|
390 |
labels = np.delete(labels, delete_elements)
|
391 |
scores = np.delete(scores, delete_elements)
|
392 |
keypoints = np.delete(keypoints, delete_elements, axis=0)
|
|
|
393 |
links = np.delete(links, delete_elements, axis=0)
|
394 |
best_points = [point for i, point in enumerate(best_points) if i not in delete_elements]
|
395 |
|
@@ -397,7 +408,7 @@ def last_correction(boxes, labels, scores, keypoints, links, best_points, pool_d
|
|
397 |
for pool_index, elements in pool_dict.items():
|
398 |
pool_dict[pool_index] = [i for i in elements if i not in delete_elements]
|
399 |
|
400 |
-
return boxes, labels, scores, keypoints, links, best_points, pool_dict
|
401 |
|
402 |
def give_link_to_element(links, labels):
|
403 |
#give a link to event to allow the creation of the BPMN id with start, indermediate and end event
|
@@ -410,11 +421,11 @@ def give_link_to_element(links, labels):
|
|
410 |
return links
|
411 |
|
412 |
|
413 |
-
def generate_data(image, boxes, labels, scores, keypoints, flow_links, best_points, pool_dict
|
414 |
idx = []
|
415 |
for i in range(len(labels)):
|
416 |
idx.append(i)
|
417 |
-
|
418 |
|
419 |
data = {
|
420 |
'image': image,
|
@@ -429,8 +440,6 @@ def generate_data(image, boxes, labels, scores, keypoints, flow_links, best_poin
|
|
429 |
'BPMN_id': bpmn_id,
|
430 |
}
|
431 |
|
432 |
-
# give a unique BPMN id to each element
|
433 |
-
data = create_BPMN_id(data)
|
434 |
|
435 |
return data
|
436 |
|
@@ -438,6 +447,10 @@ def develop_prediction(boxes, labels, scores, keypoints, class_dict, correction=
|
|
438 |
|
439 |
pool_dict, boxes, labels, scores, keypoints = regroup_elements_by_pool(boxes, labels, scores, keypoints, class_dict)
|
440 |
|
|
|
|
|
|
|
|
|
441 |
# Create links between elements
|
442 |
flow_links, best_points = create_links(keypoints, boxes, labels, class_dict)
|
443 |
|
@@ -453,9 +466,9 @@ def develop_prediction(boxes, labels, scores, keypoints, class_dict, correction=
|
|
453 |
if labels[i] == list(class_dict.values()).index('dataStore'):
|
454 |
labels[i] = list(class_dict.values()).index('dataObject')
|
455 |
|
456 |
-
boxes,labels,scores,keypoints,flow_links,best_points,pool_dict = last_correction(boxes,labels,scores,keypoints,flow_links,best_points, pool_dict)
|
457 |
|
458 |
-
return boxes, labels, scores, keypoints, flow_links, best_points, pool_dict
|
459 |
|
460 |
|
461 |
|
@@ -472,12 +485,12 @@ def full_prediction(model_object, model_arrow, image, score_threshold=0.5, iou_t
|
|
472 |
|
473 |
boxes, labels, scores, keypoints = mix_predictions(objects_pred, arrow_pred)
|
474 |
|
475 |
-
boxes, labels, scores, keypoints, flow_links, best_points, pool_dict = develop_prediction(boxes, labels, scores, keypoints, class_dict)
|
476 |
|
477 |
image = image.permute(1, 2, 0).cpu().numpy()
|
478 |
image = (image * 255).astype(np.uint8)
|
479 |
|
480 |
-
data = generate_data(image, boxes, labels, scores, keypoints, flow_links, best_points, pool_dict
|
481 |
|
482 |
return image, data
|
483 |
|
|
|
240 |
if class_dict[labels[i]] == 'messageFlow' or class_dict[labels[i]] == 'lane' or class_dict[labels[i]] == 'pool':
|
241 |
elements_not_in_pool_to_delete.append(i)
|
242 |
#delete the messageflow from the elements_not_in_pool
|
243 |
+
new_elements_not_in_pool = [i for i in elements_not_in_pool if i not in elements_not_in_pool_to_delete]
|
244 |
count = 0
|
245 |
for i in elements_not_in_pool:
|
246 |
if labels[i] != list(class_dict.values()).index('sequenceFlow') or labels[i] != list(class_dict.values()).index('messageFlow'):
|
247 |
count += 1
|
248 |
+
#check if there is only sequenceFlow or messageFlow in the new pool
|
249 |
+
if all([labels[i] in [list(class_dict.values()).index('sequenceFlow'),
|
250 |
+
list(class_dict.values()).index('messageFlow'),
|
251 |
+
list(class_dict.values()).index('dataAssociation')] for i in new_elements_not_in_pool]):
|
252 |
+
print('The new pool contains only sequenceFlow or messageFlow')
|
253 |
+
elif len(new_elements_not_in_pool) > 1 and count > 1:
|
254 |
new_pool_index = len(labels)
|
255 |
size_elements = get_size_elements(1)
|
256 |
+
box = calculate_pool_bounds(boxes,labels, new_elements_not_in_pool, size_elements)
|
257 |
boxes = np.append(boxes, [box], axis=0)
|
258 |
labels = np.append(labels, list(class_dict.values()).index('pool'))
|
259 |
scores = np.append(scores, 1.0)
|
260 |
keypoints = np.append(keypoints, np.zeros((1, 2, 3)), axis=0)
|
261 |
+
pool_dict[new_pool_index] = new_elements_not_in_pool
|
262 |
+
print(f"Created a new pool index {new_pool_index} with elements: {new_elements_not_in_pool}")
|
263 |
|
264 |
# Separate empty pools
|
265 |
non_empty_pools = {k: v for k, v in pool_dict.items() if v}
|
|
|
346 |
|
347 |
|
348 |
|
349 |
+
def last_correction(boxes, labels, scores, keypoints, bpmn_id, links, best_points, pool_dict, limit_area=10000):
|
350 |
|
351 |
#delete pool that are have only messageFlow on it
|
352 |
delete_pool = []
|
353 |
for pool_index, elements in pool_dict.items():
|
354 |
+
#find the position of the pool_index in the bpmn_id
|
355 |
+
if pool_index in bpmn_id:
|
356 |
+
position = bpmn_id.index(pool_index)
|
357 |
+
else:
|
358 |
+
continue
|
359 |
if all([labels[i] in [list(class_dict.values()).index('messageFlow'),
|
360 |
list(class_dict.values()).index('sequenceFlow'),
|
361 |
list(class_dict.values()).index('dataAssociation'),
|
362 |
list(class_dict.values()).index('lane')] for i in elements]):
|
363 |
if len(elements) > 0:
|
364 |
+
delete_pool.append(position)
|
365 |
+
print(f"Pool {pool_index} contains only arrow elements, deleting it")
|
366 |
|
367 |
#calcul the area of the pool$
|
368 |
+
if position < len(boxes):
|
369 |
+
pool = boxes[position]
|
370 |
area = (pool[2] - pool[0]) * (pool[3] - pool[1])
|
371 |
if len(pool_dict)>1 and area < limit_area:
|
372 |
+
delete_pool.append(position)
|
373 |
print(f"Pool {pool_index} is too small, deleting it")
|
374 |
|
375 |
+
if is_vertical(boxes[position]):
|
376 |
+
delete_pool.append(position)
|
377 |
+
print(f"Pool {position} is vertical, deleting it")
|
378 |
|
379 |
|
380 |
delete_elements = []
|
|
|
400 |
labels = np.delete(labels, delete_elements)
|
401 |
scores = np.delete(scores, delete_elements)
|
402 |
keypoints = np.delete(keypoints, delete_elements, axis=0)
|
403 |
+
bpmn_id = [point for i, point in enumerate(bpmn_id) if i not in delete_elements]
|
404 |
links = np.delete(links, delete_elements, axis=0)
|
405 |
best_points = [point for i, point in enumerate(best_points) if i not in delete_elements]
|
406 |
|
|
|
408 |
for pool_index, elements in pool_dict.items():
|
409 |
pool_dict[pool_index] = [i for i in elements if i not in delete_elements]
|
410 |
|
411 |
+
return boxes, labels, scores, keypoints, bpmn_id, links, best_points, pool_dict
|
412 |
|
413 |
def give_link_to_element(links, labels):
|
414 |
#give a link to event to allow the creation of the BPMN id with start, indermediate and end event
|
|
|
421 |
return links
|
422 |
|
423 |
|
424 |
+
def generate_data(image, boxes, labels, scores, keypoints, bpmn_id, flow_links, best_points, pool_dict):
|
425 |
idx = []
|
426 |
for i in range(len(labels)):
|
427 |
idx.append(i)
|
428 |
+
|
429 |
|
430 |
data = {
|
431 |
'image': image,
|
|
|
440 |
'BPMN_id': bpmn_id,
|
441 |
}
|
442 |
|
|
|
|
|
443 |
|
444 |
return data
|
445 |
|
|
|
447 |
|
448 |
pool_dict, boxes, labels, scores, keypoints = regroup_elements_by_pool(boxes, labels, scores, keypoints, class_dict)
|
449 |
|
450 |
+
bpmn_id, pool_dict = create_BPMN_id(labels,pool_dict)
|
451 |
+
|
452 |
+
print('Pool dict:', pool_dict)
|
453 |
+
|
454 |
# Create links between elements
|
455 |
flow_links, best_points = create_links(keypoints, boxes, labels, class_dict)
|
456 |
|
|
|
466 |
if labels[i] == list(class_dict.values()).index('dataStore'):
|
467 |
labels[i] = list(class_dict.values()).index('dataObject')
|
468 |
|
469 |
+
boxes,labels,scores,keypoints,bpmn_id, flow_links,best_points,pool_dict = last_correction(boxes,labels,scores,keypoints,bpmn_id,flow_links,best_points, pool_dict)
|
470 |
|
471 |
+
return boxes, labels, scores, keypoints, bpmn_id, flow_links, best_points, pool_dict
|
472 |
|
473 |
|
474 |
|
|
|
485 |
|
486 |
boxes, labels, scores, keypoints = mix_predictions(objects_pred, arrow_pred)
|
487 |
|
488 |
+
boxes, labels, scores, keypoints, bpmn_id, flow_links, best_points, pool_dict = develop_prediction(boxes, labels, scores, keypoints, class_dict)
|
489 |
|
490 |
image = image.permute(1, 2, 0).cpu().numpy()
|
491 |
image = (image * 255).astype(np.uint8)
|
492 |
|
493 |
+
data = generate_data(image, boxes, labels, scores, keypoints, bpmn_id, flow_links, best_points, pool_dict)
|
494 |
|
495 |
return image, data
|
496 |
|
modules/streamlit_utils.py
CHANGED
@@ -363,9 +363,9 @@ def modify_results(percentage_text_dist_thresh=0.5):
|
|
363 |
new_keypoints = np.concatenate((object_keypoints, arrow_keypoints))
|
364 |
|
365 |
|
366 |
-
boxes, labels, scores, keypoints, flow_links, best_points, pool_dict = develop_prediction(new_bbox, new_lab, new_scores, new_keypoints, class_dict, correction=False)
|
367 |
|
368 |
-
st.session_state.prediction = generate_data(st.session_state.prediction['image'], boxes, labels, scores, keypoints, flow_links, best_points, pool_dict
|
369 |
st.session_state.text_mapping = mapping_text(st.session_state.prediction, st.session_state.text_pred, print_sentences=False, percentage_thresh=percentage_text_dist_thresh)
|
370 |
|
371 |
st.rerun()
|
|
|
363 |
new_keypoints = np.concatenate((object_keypoints, arrow_keypoints))
|
364 |
|
365 |
|
366 |
+
boxes, labels, scores, keypoints, bpmn_id, flow_links, best_points, pool_dict = develop_prediction(new_bbox, new_lab, new_scores, new_keypoints, class_dict, correction=False)
|
367 |
|
368 |
+
st.session_state.prediction = generate_data(st.session_state.prediction['image'], boxes, labels, scores, keypoints, bpmn_id, flow_links, best_points, pool_dict)
|
369 |
st.session_state.text_mapping = mapping_text(st.session_state.prediction, st.session_state.text_pred, print_sentences=False, percentage_thresh=percentage_text_dist_thresh)
|
370 |
|
371 |
st.rerun()
|
modules/toXML.py
CHANGED
@@ -6,6 +6,16 @@ import copy
|
|
6 |
from xml.dom import minidom
|
7 |
import numpy as np
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
def align_boxes(pred, size):
|
10 |
modified_pred = copy.deepcopy(pred) # Make a deep copy of the prediction
|
11 |
|
@@ -85,23 +95,31 @@ def align_boxes(pred, size):
|
|
85 |
if len(keep_elements) != 0:
|
86 |
marge = 50
|
87 |
else:
|
88 |
-
marge = 0
|
|
|
|
|
89 |
|
90 |
-
|
|
|
|
|
|
|
|
|
|
|
91 |
pool_width = max_x - min_x
|
92 |
pool_height = max_y - min_y
|
93 |
if pool_width < 300 or pool_height < 30:
|
94 |
error("The pool is maybe too small, please add more elements or increase the scale by zooming on the image.")
|
95 |
continue
|
96 |
|
97 |
-
modified_pred['boxes'][
|
98 |
|
99 |
min_left,max_right = 0, 0
|
100 |
for pool_index, element_indices in pred['pool_dict'].items():
|
101 |
-
|
|
|
102 |
print(f"Problem with the index {pool_index} with a length of {len(modified_pred['boxes'])}")
|
103 |
continue
|
104 |
-
x1, y1, x2, y2 = modified_pred['boxes'][
|
105 |
left = x1
|
106 |
right = x2
|
107 |
if left < min_left:
|
@@ -110,15 +128,16 @@ def align_boxes(pred, size):
|
|
110 |
max_right = right
|
111 |
|
112 |
for pool_index, element_indices in pred['pool_dict'].items():
|
113 |
-
|
|
|
114 |
#print(f"Problem with the index {pool_index} with a length of {len(modified_pred['boxes'])}")
|
115 |
continue
|
116 |
-
x1, y1, x2, y2 = modified_pred['boxes'][
|
117 |
if x1 > min_left:
|
118 |
x1 = min_left
|
119 |
if x2 < max_right:
|
120 |
x2 = max_right
|
121 |
-
modified_pred['boxes'][
|
122 |
|
123 |
return modified_pred['boxes']
|
124 |
|
@@ -157,27 +176,30 @@ def create_XML(full_pred, text_mapping, size_scale, scale):
|
|
157 |
|
158 |
# Create BPMN process elements
|
159 |
process = []
|
160 |
-
|
161 |
-
|
162 |
-
|
|
|
163 |
|
164 |
bpmndi = ET.SubElement(definitions, 'bpmndi:BPMNDiagram', id='BPMNDiagram_1')
|
165 |
bpmnplane = ET.SubElement(bpmndi, 'bpmndi:BPMNPlane', id='BPMNPlane_1', bpmnElement='collaboration_1')
|
166 |
|
167 |
full_pred['boxes'] = rescale_boxes(scale, old_boxes['boxes'])
|
168 |
full_pred['boxes'] = align_boxes(full_pred, size_elements)
|
|
|
169 |
|
170 |
# Add diagram elements for each pool
|
171 |
for idx, (pool_index, keep_elements) in enumerate(full_pred['pool_dict'].items()):
|
172 |
pool_id = f'participant_{idx+1}'
|
173 |
-
pool = ET.SubElement(collaboration, 'bpmn:participant', id=pool_id, processRef=f'process_{idx+1}', name=text_mapping[
|
174 |
|
|
|
175 |
# Calculate the bounding box for the pool
|
176 |
#if len(keep_elements) == 0:
|
177 |
-
if
|
178 |
print("Problem with the index")
|
179 |
continue
|
180 |
-
min_x, min_y, max_x, max_y = full_pred['boxes'][
|
181 |
pool_width = max_x - min_x
|
182 |
pool_height = max_y - min_y
|
183 |
|
@@ -219,7 +241,7 @@ def create_big_pool(full_pred, text_mapping):
|
|
219 |
size_elements = get_size_elements(st.session_state.size_scale)
|
220 |
elements_pool = list(range(len(full_pred['boxes'])))
|
221 |
min_x, min_y, max_x, max_y = calculate_pool_bounds(full_pred['boxes'],full_pred['labels'], elements_pool, size_elements)
|
222 |
-
box = [min_x
|
223 |
full_pred['boxes'] = np.append(full_pred['boxes'], [box], axis=0)
|
224 |
full_pred['pool_dict'][new_pool_index] = elements_pool
|
225 |
full_pred['BPMN_id'].append('pool_1')
|
@@ -251,7 +273,10 @@ def rescale(scale, boxes):
|
|
251 |
boxes[i][3]*scale]
|
252 |
return boxes
|
253 |
|
254 |
-
def create_BPMN_id(
|
|
|
|
|
|
|
255 |
enums = {
|
256 |
'event': 1,
|
257 |
'task': 1,
|
@@ -268,7 +293,7 @@ def create_BPMN_id(data):
|
|
268 |
'eventBasedGateway': 1
|
269 |
}
|
270 |
|
271 |
-
BPMN_name = [class_dict[label] for label in
|
272 |
|
273 |
for idx, Bpmn_id in enumerate(BPMN_name):
|
274 |
key = {
|
@@ -288,10 +313,17 @@ def create_BPMN_id(data):
|
|
288 |
}.get(Bpmn_id, None)
|
289 |
|
290 |
if key:
|
291 |
-
|
292 |
enums[key] += 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
|
294 |
-
return
|
295 |
|
296 |
|
297 |
|
@@ -520,7 +552,7 @@ def calculate_pool_bounds(boxes, labels, keep_elements, size):
|
|
520 |
max_x = max(max_x, x + element_width)
|
521 |
max_y = max(max_y, y + element_height)
|
522 |
|
523 |
-
return min_x, min_y, max_x, max_y
|
524 |
|
525 |
|
526 |
|
|
|
6 |
from xml.dom import minidom
|
7 |
import numpy as np
|
8 |
|
9 |
+
def find_position(pool_index, BPMN_id):
|
10 |
+
#find the position of the pool_index in the bpmn_id
|
11 |
+
if pool_index in BPMN_id:
|
12 |
+
position = BPMN_id.index(pool_index)
|
13 |
+
else:
|
14 |
+
position = None
|
15 |
+
error(f"Problem with the pool index {pool_index} in the BPMN_id")
|
16 |
+
|
17 |
+
return position
|
18 |
+
|
19 |
def align_boxes(pred, size):
|
20 |
modified_pred = copy.deepcopy(pred) # Make a deep copy of the prediction
|
21 |
|
|
|
95 |
if len(keep_elements) != 0:
|
96 |
marge = 50
|
97 |
else:
|
98 |
+
marge = 0
|
99 |
+
|
100 |
+
position = find_position(pool_index, modified_pred['BPMN_id'])
|
101 |
|
102 |
+
if keep_elements == [] or position is None:
|
103 |
+
min_x, min_y, max_x, max_y = modified_pred['boxes'][position]
|
104 |
+
else:
|
105 |
+
size_elements = get_size_elements(st.session_state.size_scale)
|
106 |
+
min_x, min_y, max_x, max_y = calculate_pool_bounds(modified_pred['boxes'], modified_pred['labels'], keep_elements, size_elements)
|
107 |
+
|
108 |
pool_width = max_x - min_x
|
109 |
pool_height = max_y - min_y
|
110 |
if pool_width < 300 or pool_height < 30:
|
111 |
error("The pool is maybe too small, please add more elements or increase the scale by zooming on the image.")
|
112 |
continue
|
113 |
|
114 |
+
modified_pred['boxes'][position] = [min_x -marge, min_y-marge//2, min_x+pool_width+marge, min_y+pool_height+marge//2]
|
115 |
|
116 |
min_left,max_right = 0, 0
|
117 |
for pool_index, element_indices in pred['pool_dict'].items():
|
118 |
+
position = find_position(pool_index, modified_pred['BPMN_id'])
|
119 |
+
if position >= len(modified_pred['boxes']):
|
120 |
print(f"Problem with the index {pool_index} with a length of {len(modified_pred['boxes'])}")
|
121 |
continue
|
122 |
+
x1, y1, x2, y2 = modified_pred['boxes'][position]
|
123 |
left = x1
|
124 |
right = x2
|
125 |
if left < min_left:
|
|
|
128 |
max_right = right
|
129 |
|
130 |
for pool_index, element_indices in pred['pool_dict'].items():
|
131 |
+
position = find_position(pool_index, modified_pred['BPMN_id'])
|
132 |
+
if position >= len(modified_pred['boxes']):
|
133 |
#print(f"Problem with the index {pool_index} with a length of {len(modified_pred['boxes'])}")
|
134 |
continue
|
135 |
+
x1, y1, x2, y2 = modified_pred['boxes'][position]
|
136 |
if x1 > min_left:
|
137 |
x1 = min_left
|
138 |
if x2 < max_right:
|
139 |
x2 = max_right
|
140 |
+
modified_pred['boxes'][position] = [x1, y1, x2, y2]
|
141 |
|
142 |
return modified_pred['boxes']
|
143 |
|
|
|
176 |
|
177 |
# Create BPMN process elements
|
178 |
process = []
|
179 |
+
print(full_pred['pool_dict'])
|
180 |
+
for idx in range (len(full_pred['pool_dict'].items())):
|
181 |
+
process_id = f'process_{idx+1}'
|
182 |
+
process.append(ET.SubElement(definitions, 'bpmn:process', id=process_id, isExecutable='false'))
|
183 |
|
184 |
bpmndi = ET.SubElement(definitions, 'bpmndi:BPMNDiagram', id='BPMNDiagram_1')
|
185 |
bpmnplane = ET.SubElement(bpmndi, 'bpmndi:BPMNPlane', id='BPMNPlane_1', bpmnElement='collaboration_1')
|
186 |
|
187 |
full_pred['boxes'] = rescale_boxes(scale, old_boxes['boxes'])
|
188 |
full_pred['boxes'] = align_boxes(full_pred, size_elements)
|
189 |
+
|
190 |
|
191 |
# Add diagram elements for each pool
|
192 |
for idx, (pool_index, keep_elements) in enumerate(full_pred['pool_dict'].items()):
|
193 |
pool_id = f'participant_{idx+1}'
|
194 |
+
pool = ET.SubElement(collaboration, 'bpmn:participant', id=pool_id, processRef=f'process_{idx+1}', name=text_mapping[pool_index])
|
195 |
|
196 |
+
position = find_position(pool_index, full_pred['BPMN_id'])
|
197 |
# Calculate the bounding box for the pool
|
198 |
#if len(keep_elements) == 0:
|
199 |
+
if position >= len(full_pred['boxes']):
|
200 |
print("Problem with the index")
|
201 |
continue
|
202 |
+
min_x, min_y, max_x, max_y = full_pred['boxes'][position]
|
203 |
pool_width = max_x - min_x
|
204 |
pool_height = max_y - min_y
|
205 |
|
|
|
241 |
size_elements = get_size_elements(st.session_state.size_scale)
|
242 |
elements_pool = list(range(len(full_pred['boxes'])))
|
243 |
min_x, min_y, max_x, max_y = calculate_pool_bounds(full_pred['boxes'],full_pred['labels'], elements_pool, size_elements)
|
244 |
+
box = [min_x, min_y, max_x, max_y]
|
245 |
full_pred['boxes'] = np.append(full_pred['boxes'], [box], axis=0)
|
246 |
full_pred['pool_dict'][new_pool_index] = elements_pool
|
247 |
full_pred['BPMN_id'].append('pool_1')
|
|
|
273 |
boxes[i][3]*scale]
|
274 |
return boxes
|
275 |
|
276 |
+
def create_BPMN_id(labels,pool_dict):
|
277 |
+
|
278 |
+
BPMN_id = [class_dict[labels[i]] for i in range(len(labels))]
|
279 |
+
|
280 |
enums = {
|
281 |
'event': 1,
|
282 |
'task': 1,
|
|
|
293 |
'eventBasedGateway': 1
|
294 |
}
|
295 |
|
296 |
+
BPMN_name = [class_dict[label] for label in labels]
|
297 |
|
298 |
for idx, Bpmn_id in enumerate(BPMN_name):
|
299 |
key = {
|
|
|
313 |
}.get(Bpmn_id, None)
|
314 |
|
315 |
if key:
|
316 |
+
BPMN_id[idx] = f'{key}_{enums[key]}'
|
317 |
enums[key] += 1
|
318 |
+
|
319 |
+
# Update the pool_dict keys with their corresponding BPMN_id values
|
320 |
+
updated_pool_dict = {}
|
321 |
+
for key, value in pool_dict.items():
|
322 |
+
if key < len(BPMN_id):
|
323 |
+
new_key = BPMN_id[key]
|
324 |
+
updated_pool_dict[new_key] = value
|
325 |
|
326 |
+
return BPMN_id, updated_pool_dict
|
327 |
|
328 |
|
329 |
|
|
|
552 |
max_x = max(max_x, x + element_width)
|
553 |
max_y = max(max_y, y + element_height)
|
554 |
|
555 |
+
return min_x-50, min_y-50, max_x+100, max_y+50
|
556 |
|
557 |
|
558 |
|
toWizard.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import xml.etree.ElementTree as ET
|
2 |
+
from utils import class_dict
|
3 |
+
|
4 |
+
def rescale(scale, boxes):
|
5 |
+
for i in range(len(boxes)):
|
6 |
+
boxes[i] = [boxes[i][0] * scale, boxes[i][1] * scale, boxes[i][2] * scale, boxes[i][3] * scale]
|
7 |
+
return boxes
|
8 |
+
|
9 |
+
def create_BPMN_id(data):
|
10 |
+
enum_end, enum_start, enum_task, enum_sequence, enum_dataflow, enum_messflow, enum_messageEvent, enum_exclusiveGateway, enum_parallelGateway, enum_pool = 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
|
11 |
+
BPMN_name = [class_dict[data['labels'][i]] for i in range(len(data['labels']))]
|
12 |
+
for idx, Bpmn_id in enumerate(BPMN_name):
|
13 |
+
if Bpmn_id == 'event':
|
14 |
+
if data['links'][idx][0] is not None and data['links'][idx][1] is None:
|
15 |
+
data['BPMN_id'][idx] = f'end_event_{enum_end}'
|
16 |
+
enum_end += 1
|
17 |
+
elif data['links'][idx][0] is None and data['links'][idx][1] is not None:
|
18 |
+
data['BPMN_id'][idx] = f'start_event_{enum_start}'
|
19 |
+
enum_start += 1
|
20 |
+
elif Bpmn_id == 'task' or Bpmn_id == 'dataObject':
|
21 |
+
data['BPMN_id'][idx] = f'task_{enum_task}'
|
22 |
+
enum_task += 1
|
23 |
+
elif Bpmn_id == 'sequenceFlow':
|
24 |
+
data['BPMN_id'][idx] = f'sequenceFlow_{enum_sequence}'
|
25 |
+
enum_sequence += 1
|
26 |
+
elif Bpmn_id == 'messageFlow':
|
27 |
+
data['BPMN_id'][idx] = f'messageFlow_{enum_messflow}'
|
28 |
+
enum_messflow += 1
|
29 |
+
elif Bpmn_id == 'messageEvent':
|
30 |
+
data['BPMN_id'][idx] = f'message_event_{enum_messageEvent}'
|
31 |
+
enum_messageEvent += 1
|
32 |
+
elif Bpmn_id == 'exclusiveGateway':
|
33 |
+
data['BPMN_id'][idx] = f'exclusiveGateway_{enum_exclusiveGateway}'
|
34 |
+
enum_exclusiveGateway += 1
|
35 |
+
elif Bpmn_id == 'parallelGateway':
|
36 |
+
data['BPMN_id'][idx] = f'parallelGateway_{enum_parallelGateway}'
|
37 |
+
enum_parallelGateway += 1
|
38 |
+
elif Bpmn_id == 'dataAssociation':
|
39 |
+
data['BPMN_id'][idx] = f'dataAssociation_{enum_sequence}'
|
40 |
+
enum_dataflow += 1
|
41 |
+
elif Bpmn_id == 'pool':
|
42 |
+
data['BPMN_id'][idx] = f'pool_{enum_pool}'
|
43 |
+
enum_pool += 1
|
44 |
+
|
45 |
+
return data
|