laurenok24 commited on
Commit
97a245c
1 Parent(s): b41b87f

Upload 10 files

Browse files
microprograms/errors/angles_micro_programs.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ import math
4
+ import cv2
5
+ import sys, os
6
+ from matplotlib import image
7
+ from matplotlib import pyplot as plt
8
+ from models.pose_estimator.pose_estimator_model_setup import get_pose_estimation
9
+ from math import atan
10
+
11
+ # KEYPOINT_INDEXES = {
12
+ # 0: 'r ankle',
13
+ # 1: 'r knee',
14
+ # 2: 'r hip',
15
+ # 3: 'l hip',
16
+ # 4: 'l knee',
17
+ # 5: 'l ankle',
18
+ # 6: 'pelvis',
19
+ # 7: 'thorax',
20
+ # 8: 'upper neck',
21
+ # 9: 'head',
22
+ # 10: 'r wrist',
23
+ # 11: 'r elbow',
24
+ # 12: 'r shoulder',
25
+ # 13: 'l shoulder',
26
+ # 14: 'l elbow',
27
+ # 15: 'l wrist',
28
+ # }
29
+
30
+ def slope(x1, y1, x2, y2):
31
+ if x1 == x2:
32
+ return "undefined"
33
+ return (y2-y1)/(x2-x1)
34
+
35
+ # Function to find the
36
+ # angle between two lines
37
+ def findAngle(M1, M2):
38
+ vertical_line = False
39
+ if M1 == "undefined":
40
+ M1 = 0
41
+ vertical_line = True
42
+ if M2 == "undefined":
43
+ M2 = 0
44
+ vertical_line = True
45
+ PI = 3.14159265
46
+
47
+ # Store the tan value of the angle
48
+ angle = abs((M2 - M1) / (1 + M1 * M2))
49
+
50
+ # Calculate tan inverse of the angle
51
+ ret = atan(angle)
52
+
53
+ # Convert the angle from
54
+ # radian to degree
55
+ val = (ret * 180) / PI
56
+
57
+ # Print the result
58
+ #print(round(val, 4))
59
+ if vertical_line:
60
+ return 90 - round(val,4)
61
+ return (round(val, 4))
62
+
63
+ # np.array(pose_pred)[0][i]
64
+ def applyFeetApartError(filepath, pose_pred=None, diver_detector=None, pose_model=None):
65
+ if pose_pred is None and filepath != "":
66
+ diver_box, pose_pred = get_pose_estimation(filepath, diver_detector=diver_detector, pose_model=pose_model)
67
+ if pose_pred is not None:
68
+ pose_pred = np.array(pose_pred)[0]
69
+ average_knee = [np.mean((pose_pred[4][0], pose_pred[1][0])), np.mean((pose_pred[4][1], pose_pred[1][1]))]
70
+ vector1 = [pose_pred[5][0] - average_knee[0], pose_pred[5][1] - average_knee[1]]
71
+ vector2 = [pose_pred[0][0] - average_knee[0], pose_pred[0][1] - average_knee[1]]
72
+ unit_vector_1 = vector1 / np.linalg.norm(vector1)
73
+ unit_vector_2 = vector2 / np.linalg.norm(vector2)
74
+ dot_product = np.dot(unit_vector_1, unit_vector_2)
75
+ angle = math.degrees(np.arccos(dot_product))
76
+ # left_leg_slope = slope(np.array(pose_pred)[0][4][0], np.array(pose_pred)[0][4][1], np.array(pose_pred)[0][5][0], np.array(pose_pred)[0][5][1])
77
+ # right_leg_slope = slope(np.array(pose_pred)[0][4][0], np.array(pose_pred)[0][4][1], np.array(pose_pred)[0][0][0], np.array(pose_pred)[0][0][1])
78
+ # angle = findAngle(left_leg_slope, right_leg_slope)
79
+ return angle
80
+ else:
81
+ # print('pose_pred is None')
82
+ return None
83
+
84
+ #position: somersault or twist
85
+ def applyPositionTightnessError(filepath, pose_pred=None, diver_detector=None, pose_model=None):
86
+ if pose_pred is None and filepath != "":
87
+ diver_box, pose_pred = get_pose_estimation(filepath, diver_detector=diver_detector, pose_model=pose_model)
88
+ if pose_pred is not None:
89
+ pose_pred = np.array(pose_pred)[0]
90
+ vector1 = [pose_pred[7][0] - pose_pred[2][0], pose_pred[7][1] - pose_pred[2][1]]
91
+
92
+ vector2 = [pose_pred[1][0] - pose_pred[2][0], pose_pred[1][1] - pose_pred[2][1]]
93
+ unit_vector_1 = vector1 / np.linalg.norm(vector1)
94
+ unit_vector_2 = vector2 / np.linalg.norm(vector2)
95
+ dot_product = np.dot(unit_vector_1, unit_vector_2)
96
+ angle = math.degrees(np.arccos(dot_product))
97
+ # upper_body = slope(np.array(pose_pred)[0][2][0], np.array(pose_pred)[0][2][1], np.array(pose_pred)[0][7][0], np.array(pose_pred)[0][7][1])
98
+ # lower_body = slope(np.array(pose_pred)[0][2][0], np.array(pose_pred)[0][2][1], np.array(pose_pred)[0][1][0], np.array(pose_pred)[0][1][1])
99
+ # angle = findAngle(upper_body, lower_body)
100
+ # if position == 1:
101
+ # angle = 180 - angle
102
+ return angle
103
+ else:
104
+ # print('pose_pred is None')
105
+ return None
106
+
107
+
microprograms/errors/distance_from_springboard_micro_program.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import sys, os, distutils.core
2
+
3
+ # # os.system('python -m pip install pyyaml==5.3.1')
4
+ # # dist = distutils.core.run_setup("./detectron2/setup.py")
5
+ # # temp = ' '.join([f"'{x}'" for x in dist.install_requires])
6
+ # # cmd = "python -m pip install {0}".format(temp)
7
+ # # os.system(cmd)
8
+ # sys.path.insert(0, os.path.abspath('./detectron2'))
9
+
10
+ # import detectron2
11
+ # import cv2
12
+
13
+ # from detectron2.utils.logger import setup_logger
14
+ # setup_logger()
15
+
16
+ # # from detectron2.modeling import build_model
17
+ # from detectron2 import model_zoo
18
+ # from detectron2.engine import DefaultPredictor
19
+ # from detectron2.config import get_cfg
20
+ # from detectron2.utils.visualizer import Visualizer
21
+ # from detectron2.data import MetadataCatalog, DatasetCatalog
22
+ # from detectron2.utils.visualizer import Visualizer
23
+ # from detectron2.checkpoint import DetectionCheckpointer
24
+ # from detectron2.data.datasets import register_coco_instances
25
+
26
+ # cfg = get_cfg()
27
+ # cfg.OUTPUT_DIR = "./output/springboard/"
28
+ # # model = build_model(cfg) # returns a torch.nn.Module
29
+ # cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
30
+ # cfg.DATASETS.TEST = ()
31
+ # cfg.DATALOADER.NUM_WORKERS = 2
32
+ # cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo
33
+ # cfg.SOLVER.IMS_PER_BATCH = 2 # This is the real "batch size" commonly known to deep learning people
34
+ # cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR
35
+ # cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset
36
+ # cfg.SOLVER.STEPS = [] # do not decay learning rate
37
+ # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # The "RoIHead batch size". 128 is faster, and good enough for this toy dataset (default: 512)
38
+ # cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
39
+ # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") # path to the model we just trained
40
+ # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold
41
+ # predictor = DefaultPredictor(cfg)
42
+ # register_coco_instances("springboard_trains", {}, "./coco_annotations/springboard/train.json", "../data/Boards/spring")
43
+ # register_coco_instances("springboard_vals", {}, "./coco_annotations/springboard/val.json", "../data/Boards/spring")
44
+
45
+ # from detectron2.utils.visualizer import ColorMode
46
+ # splash_metadata = MetadataCatalog.get('springboard_vals')
47
+ # dataset_dicts = DatasetCatalog.get("springboard_vals")
48
+
49
+ # outputs_array = []
50
+ # for d in dataset_dicts:
51
+ # im = cv2.imread(d["file_name"])
52
+ # outputs = predictor(im)
53
+ # outputs_array.append(outputs) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
54
+ # v = Visualizer(im[:, :, ::-1],
55
+ # metadata=splash_metadata,
56
+ # scale=0.5,
57
+ # instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
58
+ # )
59
+ # out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
60
+ # img = out.get_image()[:, :, ::-1]
61
+ # filename = os.path.join("./output/", d["file_name"][3:])
62
+ # print(filename)
63
+ # if not cv2.imwrite(filename, img):
64
+ # print('no image written')
65
+
66
+ import torch
67
+ import numpy as np
68
+ import math
69
+ import cv2
70
+ import sys, os
71
+ from matplotlib import image
72
+ from matplotlib import pyplot as plt
73
+ from models.detectron2.springboard_detector_setup import get_springboard_detector
74
+ from models.detectron2.platform_detector_setup import get_platform_detector
75
+ from models.pose_estimator.pose_estimator_model_setup import get_pose_estimation
76
+
77
+ # springboard MICRO PROGRAM
78
+
79
+ # returns "left" or "right" depending on whether the board is on the left or right side of the frame
80
+ def find_which_side_board_on(output):
81
+ pred_classes = output['instances'].pred_classes.cpu().numpy()
82
+ platforms = np.where(pred_classes == 0)[0]
83
+ scores = output['instances'].scores[platforms]
84
+ if len(scores) == 0:
85
+ return
86
+ pred_masks = output['instances'].pred_masks[platforms]
87
+ max_instance = torch.argmax(scores)
88
+ pred_mask = np.array(pred_masks[max_instance].cpu())
89
+ for i in range(len(pred_mask[0])//2):
90
+ if sum(pred_mask[:, i]) != 0:
91
+ return "left"
92
+ elif sum(pred_mask[:, len(pred_mask[0]) - i - 1]) != 0:
93
+ return "right"
94
+ return None
95
+
96
+ def board_end(output, board_side=None):
97
+ # pred_classes = output['instances'].pred_classes.cpu().numpy()
98
+ # splashes = np.where(pred_classes == 0)[0]
99
+ # scores = output['instances'].scores[splashes]
100
+ # if len(scores) == 0:
101
+ # return
102
+ # pred_masks = output['instances'].pred_masks[splashes]
103
+ # max_instance = torch.argmax(scores)
104
+ # pred_mask = pred_masks[max_instance] # splash instance with highest confidence
105
+ pred_classes = output['instances'].pred_classes.cpu().numpy()
106
+ platforms = np.where(pred_classes == 0)[0]
107
+ scores = output['instances'].scores[platforms]
108
+ if len(scores) == 0:
109
+ return
110
+ pred_masks = output['instances'].pred_masks[platforms]
111
+ max_instance = torch.argmax(scores)
112
+ pred_mask = np.array(pred_masks[max_instance].cpu()) # splash instance with highest confidence
113
+ # need to figure out whether springboard is on left or right side of frame, then need to find where the edge is
114
+ if board_side is None:
115
+ board_side = find_which_side_board_on(output)
116
+ if board_side == "left":
117
+ for i in range(len(pred_mask[0]) - 1, -1, -1):
118
+ if sum(pred_mask[:, i]) != 0:
119
+ trues = np.where(pred_mask[:, i])[0]
120
+ return (i, min(trues))
121
+ if board_side == "right":
122
+ for i in range(len(pred_mask[0])):
123
+ if sum(pred_mask[:, i]) != 0:
124
+ trues = np.where(pred_mask[:, i])[0]
125
+ return (i, min(trues))
126
+ return None
127
+
128
+ def draw_board_end_coord(im, coord):
129
+ print("hello, im in the drawing func")
130
+ image = cv2.circle(im, (int(coord[0]),int(coord[1])), radius=10, color=(0, 0, 255), thickness=-1)
131
+ filename = os.path.join("./output/board_end/", d["file_name"][3:])
132
+ print(filename)
133
+ if not cv2.imwrite(filename, image):
134
+ print(filename)
135
+ print("file failed to write")
136
+
137
+ # loops over each image, plots a point for the end of board
138
+ # i = 0
139
+ # for d in dataset_dicts:
140
+ # im = cv2.imread(d["file_name"])
141
+ # outputs = predictor(im)
142
+
143
+ # # to draw a point on co-ordinate (200,300)
144
+ # coord = board_end(outputs)
145
+ # if coord == None:
146
+ # continue
147
+ # # plt.plot(coord[0], coord[1], marker='v', color="white")
148
+ # draw_board_end_coord(im, coord)
149
+ # i+=1
150
+
151
+ ## TODO: ADD POSE ESTIMATOR, AND CALCULATE DISTANCE FROM BOARD
152
+ # PLOT RESULTS OF ONE FULL DIVE
153
+ # KEYPOINT_INDEXES = {
154
+ # 0: 'r ankle',
155
+ # 1: 'r knee',
156
+ # 2: 'r hip',
157
+ # 3: 'l hip',
158
+ # 4: 'l knee',
159
+ # 5: 'l ankle',
160
+ # 6: 'pelvis',
161
+ # 7: 'thorax',
162
+ # 8: 'upper neck',
163
+ # 9: 'head',
164
+ # 10: 'r wrist',
165
+ # 11: 'r elbow',
166
+ # 12: 'r shoulder',
167
+ # 13: 'l shoulder',
168
+ # 14: 'l elbow',
169
+ # 15: 'l wrist',
170
+ # }
171
+
172
+ # demo_image = '../data/Boards/spring/img_17_10_00014517.jpg'
173
+ # im = cv2.imread(demo_image)
174
+
175
+ # pose_pred = get_pose_estimation(demo_image)
176
+ # print("pose_pred", pose_pred)
177
+
178
+ # predictor = get_springboard_detector()
179
+ # outputs = predictor(im)
180
+ # # to draw a point on co-ordinate (200,300)
181
+ # coord = board_end(outputs)
182
+
183
+ def draw_two_coord(im, coord1, coord2, filename):
184
+ print("hello, im in the drawing func")
185
+ image = cv2.circle(im, (int(coord1[0]),int(coord1[1])), radius=5, color=(0, 0, 255), thickness=-1)
186
+ image = cv2.circle(image, (int(coord2[0]),int(coord2[1])), radius=5, color=(0, 255, 0), thickness=-1)
187
+ print(filename)
188
+ if not cv2.imwrite(filename, image):
189
+ print(filename)
190
+ print("file failed to write")
191
+
192
+ # draw_two_coord(im, coord, np.array(pose_pred)[0][5], filename==os.path.join("./output/board_end/", demo_image[3:]))
193
+
194
+ # print("pose_pred.shape", np.array(pose_pred).shape)
195
+ # print("coord.shape", np.array(coord).shape)
196
+ # print("DISTANCE BETWEEN END BOARD AND LEFT ANKLE:", math.dist(np.array(pose_pred)[0][5], np.array(coord)))
197
+
198
+ def calculate_distance_from_springboard_for_one_frame(filepath, visualize=False, dive_folder_num="", springboard_detector=None, pose_pred=None, pose_model=None, board_end_coord=None, board_side=None):
199
+ if springboard_detector is None:
200
+ springboard_detector = get_springboard_detector()
201
+ if pose_pred is None:
202
+ diver_box, pose_pred = get_pose_estimation(filepath, pose_model=pose_model)
203
+ im = cv2.imread(filepath)
204
+ outputs = springboard_detector(im)
205
+ if board_end_coord is None:
206
+ board_end_coord = board_end(outputs, board_side=board_side)
207
+ minDist = None
208
+ if board_end_coord is not None and pose_pred is not None and len(board_end_coord) == 2:
209
+ minDist = float('inf')
210
+ for i in range(len(np.array(pose_pred)[0])):
211
+ distance = math.dist(np.array(pose_pred)[0][i], np.array(board_end_coord))
212
+ if distance < minDist:
213
+ minDist = distance
214
+ minJoint = i
215
+ if visualize:
216
+ file_name = filepath.split('/')[-1]
217
+ folder = "./output/data/distance_from_board/{}".format(dive_folder_num)
218
+ out_filename = os.path.join(folder, file_name)
219
+ if not os.path.exists(folder):
220
+ os.makedirs(folder)
221
+ draw_two_coord(im, board_end_coord, np.array(pose_pred)[0][minJoint], filename=out_filename)
222
+ ## more verbose
223
+ # else:
224
+ # print("springboard or diver not detected in", filepath)
225
+ # if board_end_coord is None:
226
+ # print("springboard not detected in", filepath)
227
+ # if pose_pred is None:
228
+ # print("diver not detected in", filepath)
229
+ return minDist
230
+
231
+ def calculate_distance_from_platform_for_one_frame(filepath, im=None, visualize=False, dive_folder_num="", platform_detector=None, pose_pred=None, diver_detector=None, pose_model=None, board_end_coord=None, board_side=None):
232
+ if platform_detector is None:
233
+ platform_detector = get_platform_detector()
234
+ if pose_pred is None:
235
+ diver_box, pose_pred = get_pose_estimation(filepath, image_bgr=im, diver_detector=diver_detector, pose_model=pose_model)
236
+ if im is None and filepath != "":
237
+ im = cv2.imread(filepath)
238
+ if board_end_coord is None:
239
+ outputs = platform_detector(im)
240
+ board_end_coord = board_end(outputs, board_side=board_side)
241
+ minDist = None
242
+ if board_end_coord is not None and pose_pred is not None and len(board_end_coord) == 2:
243
+ minDist = float('inf')
244
+ for i in range(len(np.array(pose_pred)[0])):
245
+ distance = math.dist(np.array(pose_pred)[0][i], np.array(board_end_coord))
246
+ if distance < minDist:
247
+ minDist = distance
248
+ minJoint = i
249
+ if visualize:
250
+ file_name = filepath.split('/')[-1]
251
+ folder = "./output/data/distance_from_board/{}".format(dive_folder_num)
252
+ out_filename = os.path.join(folder, file_name)
253
+ if not os.path.exists(folder):
254
+ os.makedirs(folder)
255
+ draw_two_coord(im, board_end_coord, np.array(pose_pred)[0][minJoint], filename=out_filename)
256
+ ## more verbose
257
+ # else:
258
+ # print("platform or diver not detected in", filepath)
259
+ # if board_end_coord is None:
260
+ # print("platform not detected in", filepath)
261
+ # if pose_pred is None:
262
+ # print("diver not detected in", filepath)
263
+ return minDist
264
+
265
+ # distances = []
266
+ # directory = "./FineDiving/datasets/FINADiving_MTL_256s/17/73/"
267
+ # file_names = os.listdir(directory)
268
+ # for file_name in file_names:
269
+ # path = os.path.join(directory, file_name)
270
+ # pose_pred = get_pose_estimation(path)
271
+ # print("PATH IM_17_73:", path)
272
+ # im = cv2.imread(path)
273
+ # outputs = predictor(im)
274
+ # coord = board_end(outputs)
275
+ # if coord is not None and pose_pred is not None and len(coord) == 2:
276
+ # distance = math.dist(np.array(pose_pred)[0][5], np.array(coord))
277
+ # if distance is None:
278
+ # distances.append(0)
279
+ # else:
280
+ # distances.append(distance)
281
+ # filename = os.path.join("./output/data/img_17_73/", file_name)
282
+ # draw_two_coord(im, coord, np.array(pose_pred)[0][5], filename=filename)
283
+ # else:
284
+ # distances.append(0)
285
+
286
+ # plt.plot(range(len(distances)), distances)
287
+ # plt.savefig('./output/data/img_17_73/img_17_73_board_dist_graph.png')
288
+
289
+
290
+
microprograms/errors/feet_apart.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import math
3
+ from models.pose_estimator.pose_estimator_model_setup import get_pose_estimation
4
+
5
+ def applyFeetApartError(filepath, pose_pred=None, diver_detector=None, pose_model=None):
6
+ if pose_pred is None and filepath != "":
7
+ diver_box, pose_pred = get_pose_estimation(filepath, diver_detector=diver_detector, pose_model=pose_model)
8
+ if pose_pred is not None:
9
+ pose_pred = np.array(pose_pred)[0]
10
+ average_knee = [np.mean((pose_pred[4][0], pose_pred[1][0])), np.mean((pose_pred[4][1], pose_pred[1][1]))]
11
+ vector1 = [pose_pred[5][0] - average_knee[0], pose_pred[5][1] - average_knee[1]]
12
+ vector2 = [pose_pred[0][0] - average_knee[0], pose_pred[0][1] - average_knee[1]]
13
+ unit_vector_1 = vector1 / np.linalg.norm(vector1)
14
+ unit_vector_2 = vector2 / np.linalg.norm(vector2)
15
+ dot_product = np.dot(unit_vector_1, unit_vector_2)
16
+ angle = math.degrees(np.arccos(dot_product))
17
+ return angle
18
+ else:
19
+ # print('pose_pred is None')
20
+ return None
microprograms/errors/over_rotation.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import math
3
+ from models.pose_estimator.pose_estimator_model_setup import get_pose_estimation
4
+
5
+ def over_rotation(filepath, pose_pred=None, diver_detector=None, pose_model=None):
6
+ if pose_pred is None and filepath != "":
7
+ diver_box, pose_pred = get_pose_estimation(filepath, diver_detector=diver_detector, pose_model=pose_model)
8
+ if pose_pred is not None:
9
+ pose_pred = np.array(pose_pred)[0]
10
+ vector1 = [(pose_pred[0][0] - pose_pred[2][0]), 0-(pose_pred[0][1] - pose_pred[2][1])]
11
+ vector2 = [-1, 0]
12
+ unit_vector_1 = vector1 / np.linalg.norm(vector1)
13
+ unit_vector_2 = vector2 / np.linalg.norm(vector2)
14
+ dot_product = np.dot(unit_vector_1, unit_vector_2)
15
+ angle = math.degrees(np.arccos(dot_product))
16
+ return angle
17
+ else:
18
+ # print('pose_pred is None')
19
+ return None
microprograms/errors/position_tightness.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import math
3
+ from models.pose_estimator.pose_estimator_model_setup import get_pose_estimation
4
+
5
+ def applyPositionTightnessError(filepath, pose_pred=None):
6
+ if pose_pred is None and filepath != "":
7
+ diver_box, pose_pred = get_pose_estimation(filepath)
8
+ if pose_pred is not None:
9
+ pose_pred = np.array(pose_pred)[0]
10
+ vector1 = [pose_pred[7][0] - pose_pred[2][0], pose_pred[7][1] - pose_pred[2][1]]
11
+
12
+ vector2 = [pose_pred[1][0] - pose_pred[2][0], pose_pred[1][1] - pose_pred[2][1]]
13
+ unit_vector_1 = vector1 / np.linalg.norm(vector1)
14
+ unit_vector_2 = vector2 / np.linalg.norm(vector2)
15
+ dot_product = np.dot(unit_vector_1, unit_vector_2)
16
+ angle = math.degrees(np.arccos(dot_product))
17
+ return angle
18
+ else:
19
+ # print('pose_pred is None')
20
+ return None
microprograms/errors/splash_micro_program.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys, os, distutils.core
2
+
3
+ # os.system('python -m pip install pyyaml==5.3.1')
4
+ # dist = distutils.core.run_setup("./detectron2/setup.py")
5
+ # temp = ' '.join([f"'{x}'" for x in dist.install_requires])
6
+ # cmd = "python -m pip install {0}".format(temp)
7
+ # os.system(cmd)
8
+ sys.path.insert(0, os.path.abspath('./detectron2'))
9
+
10
+ import detectron2
11
+ import numpy as np
12
+ import cv2
13
+
14
+ # from detectron2.utils.logger import setup_logger
15
+ # setup_logger()
16
+
17
+ # # from detectron2.modeling import build_model
18
+ # from detectron2 import model_zoo
19
+ # from detectron2.engine import DefaultPredictor
20
+ # from detectron2.config import get_cfg
21
+ from detectron2.utils.visualizer import Visualizer
22
+ # from detectron2.data import MetadataCatalog, DatasetCatalog
23
+ # from detectron2.checkpoint import DetectionCheckpointer
24
+ # from detectron2.data.datasets import register_coco_instances
25
+
26
+ # cfg = get_cfg()
27
+ # cfg.OUTPUT_DIR = "./output/splash/"
28
+ # # model = build_model(cfg) # returns a torch.nn.Module
29
+ # cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
30
+ # cfg.DATASETS.TRAIN = ("splash_trains",)
31
+ # cfg.DATASETS.TEST = ()
32
+ # cfg.DATALOADER.NUM_WORKERS = 2
33
+ # cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml") # Let training initialize from model zoo
34
+ # cfg.SOLVER.IMS_PER_BATCH = 2 # This is the real "batch size" commonly known to deep learning people
35
+ # cfg.SOLVER.BASE_LR = 0.00025 # pick a good LR
36
+ # cfg.SOLVER.MAX_ITER = 300 # 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset
37
+ # cfg.SOLVER.STEPS = [] # do not decay learning rate
38
+ # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # The "RoIHead batch size". 128 is faster, and good enough for this toy dataset (default: 512)
39
+ # cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
40
+ # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth") # path to the model we just trained
41
+ # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7 # set a custom testing threshold
42
+ # predictor = DefaultPredictor(cfg)
43
+ # register_coco_instances("splash_trains", {}, "./coco_annotations/splash/train.json", "./data/Splashes")
44
+ # register_coco_instances("splash_vals", {}, "./coco_annotations/splash/val.json", "./data/Splashes")
45
+
46
+ # from detectron2.utils.visualizer import ColorMode
47
+ # splash_metadata = MetadataCatalog.get('splash_vals')
48
+ # dataset_dicts = DatasetCatalog.get("splash_vals")
49
+
50
+ # outputs_array = []
51
+ # for d in dataset_dicts:
52
+ # im = cv2.imread(d["file_name"])
53
+ # outputs = predictor(im)
54
+ # outputs_array.append(outputs) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
55
+ # v = Visualizer(im[:, :, ::-1],
56
+ # metadata=splash_metadata,
57
+ # scale=0.5,
58
+ # instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
59
+ # )
60
+ # out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
61
+ # img = out.get_image()[:, :, ::-1]
62
+ # filename = os.path.join("./output", d["file_name"][2:])
63
+ # if not cv2.imwrite(filename, img):
64
+ # print('no image written')
65
+
66
+ import torch
67
+ # SPLASH MICRO PROGRAM
68
+ # find percentage of "True" in pred_masks, lower the percentage the better. get from outputs
69
+ # may need to calibrate to points
70
+
71
+ def get_splash_pred_mask(output):
72
+ pred_classes = output['instances'].pred_classes.cpu().numpy()
73
+ splashes = np.where(pred_classes == 0)[0]
74
+ scores = output['instances'].scores[splashes]
75
+ if len(scores) == 0:
76
+ return None
77
+ pred_masks = output['instances'].pred_masks[splashes]
78
+ max_instance = torch.argmax(scores)
79
+ # pred_mask = pred_masks[max_instance] # splash instance with highest confidence
80
+ pred_mask = np.array(pred_masks[max_instance].cpu())
81
+ return pred_mask
82
+
83
+ # function that finds the splash instance with the highest percent confidence
84
+ # and returns the
85
+ def splash_area_percentage(output, pred_mask=None):
86
+ if pred_mask is None:
87
+ return
88
+ # loops over pixels to get sum of splash pixels
89
+ totalSum = 0
90
+ for j in range(len(pred_mask)):
91
+ totalSum += pred_mask[j].sum()
92
+ # return percentage of image that is splash
93
+ return totalSum/(len(pred_mask) * len(pred_mask[0]))
94
+
95
+
96
+ # loops over each image
97
+ # i = 0
98
+ # for output in outputs_array:
99
+ # print(dataset_dicts[i]["file_name"])
100
+ # # print(output)
101
+ # print(splash_area_percentage(output))
102
+ # i+=1
103
+
104
+
105
+ # TODO: run splash micro program on one diving clip
106
+ # plot splash area percentage and save image
107
+
108
+ import matplotlib.pyplot as plt
109
+ from models.detectron2.splash_detector_setup import get_splash_detector
110
+
111
+ def get_splash_from_one_frame(filepath, im=None, predictor=None, visualize=False, dive_folder_num=""):
112
+ if predictor is None:
113
+ predictor=get_splash_detector()
114
+ if im is None:
115
+ im = cv2.imread(filepath)
116
+ outputs = predictor(im)
117
+ pred_mask = get_splash_pred_mask(outputs)
118
+ area = splash_area_percentage(outputs, pred_mask=pred_mask)
119
+ if area is None:
120
+ # print("no splash detected in", filepath)
121
+ return None, None
122
+ if visualize:
123
+ pred_boxes = outputs['instances'].pred_boxes
124
+ print("pred_boxes", pred_boxes)
125
+ for box in pred_boxes:
126
+ image = cv2.rectangle(im, (int(box[0]),int(box[1])), (int(box[2]),int(box[3])), color=(0, 0, 255), thickness=2)
127
+ out_folder= "./output/data/splash/{}".format(dive_folder_num)
128
+ if not os.path.exists(out_folder):
129
+ os.makedirs(out_folder)
130
+ filename = os.path.join(out_folder, filepath.split('/')[-1])
131
+ if not cv2.imwrite(filename, image):
132
+ print('no image written to', filename)
133
+ break
134
+
135
+ return area.tolist(), pred_mask
136
+
137
+ # outputs_array2 = []
138
+ # directory = "./MTL-AQA/"
139
+ # file_names = os.listdir(directory)
140
+ # for file_name in file_names:
141
+ # if file_name[:10] == "img_01_06_":
142
+ # path = os.path.join(directory, file_name)
143
+ # # im = caffe.io.load_image(path)
144
+ # print("PATH IM_01_06:", path)
145
+ # im = cv2.imread(path)
146
+ # outputs = predictor(im)
147
+ # outputs_array2.append(outputs) # format is documented at https://detectron2.readthedocs.io/tutorials/models.html#model-output-format
148
+ # v = Visualizer(im[:, :, ::-1],
149
+ # metadata=splash_metadata,
150
+ # scale=0.5,
151
+ # instance_mode=ColorMode.IMAGE_BW # remove the colors of unsegmented pixels. This option is only available for segmentation models
152
+ # )
153
+ # out = v.draw_instance_predictions(outputs["instances"].to("cpu"))
154
+ # img = out.get_image()[:, :, ::-1]
155
+ # filename = os.path.join("./output/data/img_01_06/", file_name)
156
+ # if not cv2.imwrite(filename, img):
157
+ # print('no image written')
158
+
159
+ # i = 0
160
+ # splash_area = []
161
+ # for output in outputs_array2:
162
+ # # print(output)
163
+ # area = splash_area_percentage(output)
164
+ # if area is None:
165
+ # splash_area.append(0)
166
+ # else:
167
+ # print(area.cpu().data.item())
168
+ # splash_area.append(area.cpu().data.item())
169
+ # # print(splash_area_percentage(output))
170
+ # i+=1
171
+
172
+ # print(range(i))
173
+ # print(splash_area)
174
+ # plt.plot(range(i), splash_area)
175
+ # plt.savefig('./output/data/img_01_06/img_01_06_splash_graph.png')
176
+
177
+ # from detectron2.evaluation import COCOEvaluator, inference_on_dataset
178
+ # from detectron2.data import build_detection_test_loader
179
+ # evaluator = COCOEvaluator("splash_vals", output_dir="./output")
180
+ # val_loader = build_detection_test_loader(cfg, "splash_vals")
181
+ # print(inference_on_dataset(predictor.model, val_loader, evaluator))
microprograms/temporal_segmentation/entry.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from microprograms.errors.splash_micro_program import get_splash_from_one_frame
2
+ from microprograms.errors.angles_micro_programs import applyPositionTightnessError
3
+ import numpy as np
4
+
5
+ def entry_microprogram_one_frame(filepath, above_board, on_board, pose_pred, expected_twists, petal_count, expected_som, half_som_count, frame=None, splash_detector=None, visualize=False, dive_folder_num=None):
6
+ if above_board:
7
+ return 0
8
+ if on_board:
9
+ return 0
10
+ splash = get_splash_from_one_frame(filepath, im=frame, predictor=splash_detector, visualize=visualize, dive_folder_num=dive_folder_num)
11
+ if splash:
12
+ return 1
13
+ # if completed with somersaults, we know we're in entry phase
14
+ if not expected_som > half_som_count:
15
+ return 1
16
+ if expected_twists > petal_count or expected_som > half_som_count:
17
+ return 0
18
+ return 1
19
+
20
+ # if pose_pred is None:
21
+ # return 1
22
+ # else:
23
+ # angle = applyPositionTightnessError(filepath, pose_pred)
24
+ # if angle > 90:
25
+ # return 1
26
+ # if splash is not None:
27
+ # return 1
28
+ # else:
29
+ # return 0
30
+
microprograms/temporal_segmentation/somersault.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from microprograms.errors.angles_micro_programs import applyPositionTightnessError
2
+ # from models.pose_estimator.pose_estimator_model_setup import get_pose_estimation
3
+
4
+ def somersault_microprogram_one_frame(filepath, on_board, expected_som, half_som_count, expected_twists, petal_count, pose_pred=None, diver_detector=None, pose_model=None):
5
+ if on_board:
6
+ return 0
7
+ if expected_som <= half_som_count:
8
+ return 0
9
+ # if not done with som or twists, need to determine if som or twist
10
+ angle = applyPositionTightnessError(filepath, pose_pred, diver_detector=diver_detector, pose_model=pose_model)
11
+ if angle is None:
12
+ return 0
13
+ # if not done with som but done with twists
14
+ if expected_som > half_som_count and expected_twists <= petal_count:
15
+ return 1
16
+ # print("angle:", angle)
17
+ if angle <= 80:
18
+ return 1
19
+ else:
20
+ return 0
microprograms/temporal_segmentation/start_takeoff.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from microprograms.errors.angles_micro_programs import applyPositionTightnessError
2
+ from models.pose_estimator.pose_estimator_model_setup import get_pose_estimation
3
+ from microprograms.errors.distance_from_springboard_micro_program import board_end
4
+
5
+ from microprograms.errors.distance_from_springboard_micro_program import find_which_side_board_on
6
+ from models.detectron2.platform_detector_setup import get_platform_detector
7
+ import numpy as np
8
+ import cv2
9
+
10
+ def takeoff_microprogram_one_frame(filepath, above_board, on_board, pose_pred=None):
11
+ if not above_board:
12
+ return 0
13
+ if on_board:
14
+ return 1
15
+ return 0
16
+ # angle = applyPositionTightnessError(filepath, pose_pred)
17
+ # if angle is None:
18
+ # return 0
19
+ # if angle > 90:
20
+ # return 1
21
+ # else:
22
+ # return 0
microprograms/temporal_segmentation/twist.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from microprograms.errors.angles_micro_programs import applyPositionTightnessError
2
+ from models.pose_estimator.pose_estimator_model_setup import get_pose_estimation
3
+ from microprograms.errors.distance_from_springboard_micro_program import board_end
4
+
5
+ from microprograms.errors.distance_from_springboard_micro_program import find_which_side_board_on
6
+ from models.detectron2.platform_detector_setup import get_platform_detector
7
+ import cv2
8
+ import numpy as np
9
+
10
+ def twist_microprogram_one_frame(filepath, on_board, expected_twists, petal_count, expected_som, half_som_count, pose_pred=None, diver_detector=None, pose_model=None):
11
+ if on_board:
12
+ return 0
13
+ if expected_twists <= petal_count or expected_som <= half_som_count:
14
+ return 0
15
+ angle = applyPositionTightnessError(filepath, pose_pred=pose_pred, diver_detector=diver_detector, pose_model=pose_model)
16
+ if angle is None:
17
+ return 0
18
+ if angle > 80:
19
+ return 1
20
+ else:
21
+ return 0