akhaliq HF staff commited on
Commit
568e936
1 Parent(s): 52c53fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -43
app.py CHANGED
@@ -48,8 +48,8 @@ def preprocess(image):
48
  # based on the build flags) when instantiating InferenceSession.
49
  # For example, if NVIDIA GPU is available and ORT Python package is built with CUDA, then call API as following:
50
  # onnxruntime.InferenceSession(path/to/model, providers=['CUDAExecutionProvider'])
51
- os.system("wget https://github.com/AK391/models/raw/main/vision/object_detection_segmentation/mask-rcnn/model/MaskRCNN-10.onnx")
52
- sess = rt.InferenceSession("MaskRCNN-10.onnx")
53
 
54
  outputs = sess.get_outputs()
55
 
@@ -57,51 +57,22 @@ outputs = sess.get_outputs()
57
  classes = [line.rstrip('\n') for line in open('coco_classes.txt')]
58
 
59
 
60
- def display_objdetect_image(image, boxes, labels, scores, masks, score_threshold=0.7):
61
  # Resize boxes
62
  ratio = 800.0 / min(image.size[0], image.size[1])
63
  boxes /= ratio
64
 
65
  _, ax = plt.subplots(1, figsize=(12,9))
66
-
67
  image = np.array(image)
 
68
 
69
- for mask, box, label, score in zip(masks, boxes, labels, scores):
70
- # Showing boxes with score > 0.7
71
- if score <= score_threshold:
72
- continue
73
-
74
- # Finding contour based on mask
75
- mask = mask[0, :, :, None]
76
- int_box = [int(i) for i in box]
77
- mask = cv2.resize(mask, (int_box[2]-int_box[0]+1, int_box[3]-int_box[1]+1))
78
- mask = mask > 0.5
79
- im_mask = np.zeros((image.shape[0], image.shape[1]), dtype=np.uint8)
80
- x_0 = max(int_box[0], 0)
81
- x_1 = min(int_box[2] + 1, image.shape[1])
82
- y_0 = max(int_box[1], 0)
83
- y_1 = min(int_box[3] + 1, image.shape[0])
84
- mask_y_0 = max(y_0 - box[1], 0)
85
- mask_y_1 = mask_y_0 + y_1 - y_0
86
- mask_x_0 = max(x_0 - box[0], 0)
87
- mask_x_1 = mask_x_0 + x_1 - x_0
88
- im_mask[y_0:y_1, x_0:x_1] = mask[
89
- mask_y_0 : mask_y_1, mask_x_0 : mask_x_1
90
- ]
91
- im_mask = im_mask[:, :, None]
92
-
93
- # OpenCV version 4.x
94
- contours, hierarchy = cv2.findContours(
95
- im_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
96
- )
97
-
98
- image = cv2.drawContours(image, contours, -1, 25, 3)
99
-
100
- rect = patches.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1], linewidth=1, edgecolor='b', facecolor='none')
101
- ax.annotate(classes[label] + ':' + str(np.round(score, 2)), (box[0], box[1]), color='w', fontsize=12)
102
- ax.add_patch(rect)
103
 
104
- ax.imshow(image)
105
  plt.axis('off')
106
  plt.savefig('out.png', bbox_inches='tight')
107
 
@@ -114,11 +85,12 @@ def inference(img):
114
  output_names = list(map(lambda output: output.name, outputs))
115
  input_name = sess.get_inputs()[0].name
116
 
117
- boxes, labels, scores, masks = sess.run(output_names, {input_name: input_tensor})
118
- display_objdetect_image(input_image, boxes, labels, scores, masks)
 
119
  return 'out.png'
120
 
121
- title="Mask R-CNN"
122
- description="This model is a real-time neural network for object instance segmentation that detects 80 different classes."
123
  examples=[["examplemask-rcnn.jpeg"]]
124
  gr.Interface(inference,gr.inputs.Image(type="filepath"),gr.outputs.Image(type="file"),title=title,description=description,examples=examples).launch(enable_queue=True)
 
48
  # based on the build flags) when instantiating InferenceSession.
49
  # For example, if NVIDIA GPU is available and ORT Python package is built with CUDA, then call API as following:
50
  # onnxruntime.InferenceSession(path/to/model, providers=['CUDAExecutionProvider'])
51
+ os.system("wget https://github.com/AK391/models/raw/main/vision/object_detection_segmentation/faster-rcnn/model/FasterRCNN-10.onnx")
52
+ sess = rt.InferenceSession("FasterRCNN-10.onnx")
53
 
54
  outputs = sess.get_outputs()
55
 
 
57
  classes = [line.rstrip('\n') for line in open('coco_classes.txt')]
58
 
59
 
60
+ def display_objdetect_image(image, boxes, labels, scores, score_threshold=0.7):
61
  # Resize boxes
62
  ratio = 800.0 / min(image.size[0], image.size[1])
63
  boxes /= ratio
64
 
65
  _, ax = plt.subplots(1, figsize=(12,9))
 
66
  image = np.array(image)
67
+ ax.imshow(image)
68
 
69
+ # Showing boxes with score > 0.7
70
+ for box, label, score in zip(boxes, labels, scores):
71
+ if score > score_threshold:
72
+ rect = patches.Rectangle((box[0], box[1]), box[2] - box[0], box[3] - box[1], linewidth=1, edgecolor='b', facecolor='none')
73
+ ax.annotate(classes[label] + ':' + str(np.round(score, 2)), (box[0], box[1]), color='w', fontsize=12)
74
+ ax.add_patch(rect)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
 
76
  plt.axis('off')
77
  plt.savefig('out.png', bbox_inches='tight')
78
 
 
85
  output_names = list(map(lambda output: output.name, outputs))
86
  input_name = sess.get_inputs()[0].name
87
 
88
+ boxes, labels, scores = sess.run(output_names, {input_name: input_tensor})
89
+ display_objdetect_image(input_image, boxes, labels, scores)
90
+
91
  return 'out.png'
92
 
93
+ title="Faster R-CNN"
94
+ description="This model is a real-time neural network for object detection that detects 80 different classes."
95
  examples=[["examplemask-rcnn.jpeg"]]
96
  gr.Interface(inference,gr.inputs.Image(type="filepath"),gr.outputs.Image(type="file"),title=title,description=description,examples=examples).launch(enable_queue=True)