turhancan97 commited on
Commit
b548adc
1 Parent(s): 161fbc4

app deploy with multiple object

Browse files
Files changed (1) hide show
  1. app.py +23 -22
app.py CHANGED
@@ -15,8 +15,8 @@ def drawAxis(img, p_, q_, color, scale):
15
  hypotenuse = sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0]))
16
 
17
  # Here we lengthen the arrow by a factor of scale
18
- q[0] = p[0] - scale * hypotenuse * cos(angle)
19
- q[1] = p[1] - scale * hypotenuse * sin(angle)
20
  cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
21
 
22
  # create the arrow hooks
@@ -69,6 +69,7 @@ def getOrientation(pts, img):
69
  file_urls = [
70
  'https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/cefd9731-c57c-428b-b401-fd54a8bd0a95',
71
  'https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/acbad76a-33f9-4028-b012-4ece5998c272',
 
72
  'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1'
73
  ]
74
 
@@ -103,26 +104,26 @@ def show_preds_image(image_path):
103
  dim=(width,height)
104
 
105
  outputs = model.predict(source=img_res_toshow)
106
-
107
- #obtain BW image
108
- bw=(outputs[0].masks.masks[0].cpu().numpy() * 255).astype("uint8")
109
- #BW image with same dimention of initial image
110
- bw=cv2.resize(bw, dim, interpolation = cv2.INTER_AREA)
111
- img=img_res_toshow
112
- contours, _ = cv2.findContours(bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
113
- for i, c in enumerate(contours):
114
- # Calculate the area of each contour
115
- area = cv2.contourArea(c)
116
-
117
- # Ignore contours that are too small or too large
118
- if area < 3700 or 100000 < area:
119
- continue
120
-
121
- # Draw each contour only for visualisation purposes
122
- cv2.drawContours(img, contours, i, (0, 0, 255), 2)
123
-
124
- # Find the orientation of each shape
125
- angle_deg = getOrientation(c, img)
126
 
127
  results = outputs[0].cpu().numpy()
128
  for i, det in enumerate(results.boxes.xyxy):
 
15
  hypotenuse = sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0]))
16
 
17
  # Here we lengthen the arrow by a factor of scale
18
+ q[0] = p[0] - scale * hypotenuse/3 * cos(angle)
19
+ q[1] = p[1] - scale * hypotenuse/3 * sin(angle)
20
  cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), color, 3, cv2.LINE_AA)
21
 
22
  # create the arrow hooks
 
69
  file_urls = [
70
  'https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/cefd9731-c57c-428b-b401-fd54a8bd0a95',
71
  'https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/acbad76a-33f9-4028-b012-4ece5998c272',
72
+ 'https://github.com/lucarei/orientation-detection-robotic-grasping/assets/22428774/ce8a0fb9-99ea-4952-bcc4-3afa023066d9',
73
  'https://www.dropbox.com/s/7sjfwncffg8xej2/video_7.mp4?dl=1'
74
  ]
75
 
 
104
  dim=(width,height)
105
 
106
  outputs = model.predict(source=img_res_toshow)
107
+ for object in range(len(outputs[0].masks.masks)):
108
+ #obtain BW image
109
+ bw=(outputs[0].masks.masks[object].cpu().numpy() * 255).astype("uint8")
110
+ #BW image with same dimention of initial image
111
+ bw=cv2.resize(bw, dim, interpolation = cv2.INTER_AREA)
112
+ img=img_res_toshow
113
+ contours, _ = cv2.findContours(bw, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
114
+ for i, c in enumerate(contours):
115
+ # Calculate the area of each contour
116
+ area = cv2.contourArea(c)
117
+
118
+ # Ignore contours that are too small or too large
119
+ if area < 2500 or 500000 < area:
120
+ continue
121
+
122
+ # Draw each contour only for visualisation purposes
123
+ cv2.drawContours(img, contours, i, (0, 0, 255), 2)
124
+
125
+ # Find the orientation of each shape
126
+ angle_deg = getOrientation(c, img)
127
 
128
  results = outputs[0].cpu().numpy()
129
  for i, det in enumerate(results.boxes.xyxy):