Spaces:
Runtime error
Runtime error
kushagra124
commited on
Commit
•
4526494
1
Parent(s):
37f3e21
image addded
Browse files- Clip_model_notebook.ipynb +0 -32
- app.py +2 -2
Clip_model_notebook.ipynb
CHANGED
@@ -2446,38 +2446,6 @@
|
|
2446 |
"source": [
|
2447 |
"plt.imshow(display_images(rgb_image,detections=predictions,prompt='Cars'))"
|
2448 |
]
|
2449 |
-
},
|
2450 |
-
{
|
2451 |
-
"cell_type": "code",
|
2452 |
-
"execution_count": 8,
|
2453 |
-
"metadata": {
|
2454 |
-
"id": "y1DuKm34myry"
|
2455 |
-
},
|
2456 |
-
"outputs": [
|
2457 |
-
{
|
2458 |
-
"data": {
|
2459 |
-
"text/plain": [
|
2460 |
-
"['bed', 'door', 'window', 'cars']"
|
2461 |
-
]
|
2462 |
-
},
|
2463 |
-
"execution_count": 8,
|
2464 |
-
"metadata": {},
|
2465 |
-
"output_type": "execute_result"
|
2466 |
-
}
|
2467 |
-
],
|
2468 |
-
"source": [
|
2469 |
-
"a = 'bed ,door, window, cars '\n",
|
2470 |
-
"v = a.split(',')\n",
|
2471 |
-
"v = list(map(lambda x: x.strip(),v))\n",
|
2472 |
-
"v"
|
2473 |
-
]
|
2474 |
-
},
|
2475 |
-
{
|
2476 |
-
"cell_type": "code",
|
2477 |
-
"execution_count": null,
|
2478 |
-
"metadata": {},
|
2479 |
-
"outputs": [],
|
2480 |
-
"source": []
|
2481 |
}
|
2482 |
],
|
2483 |
"metadata": {
|
|
|
2446 |
"source": [
|
2447 |
"plt.imshow(display_images(rgb_image,detections=predictions,prompt='Cars'))"
|
2448 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2449 |
}
|
2450 |
],
|
2451 |
"metadata": {
|
app.py
CHANGED
@@ -49,7 +49,7 @@ def detect_using_clip(image,prompts=[],threshould=0.4):
|
|
49 |
# extract countours from the image
|
50 |
lbl_0 = label(predicted_image)
|
51 |
props = regionprops(lbl_0)
|
52 |
-
model_detections[prompt] = [rescale_bbox(prop.bbox,orig_image_shape=image.shape[:2],model_shape=predicted_image.shape[0]) for prop in props]
|
53 |
|
54 |
return model_detections
|
55 |
|
@@ -59,7 +59,7 @@ def visualize_images(image,detections,prompt):
|
|
59 |
if prompt not in detections.keys():
|
60 |
print("prompt not in query ..")
|
61 |
return image_copy
|
62 |
-
for bbox in detections[prompt]:
|
63 |
cv2.rectangle(image_copy, (int(bbox[1]), int(bbox[0])), (int(bbox[3]), int(bbox[2])), (255, 0, 0), 2)
|
64 |
cv2.putText(image_copy,str(prompt),(int(bbox[1]), int(bbox[0])),cv2.FONT_HERSHEY_SIMPLEX, 2, 255)
|
65 |
return image_copy
|
|
|
49 |
# extract countours from the image
|
50 |
lbl_0 = label(predicted_image)
|
51 |
props = regionprops(lbl_0)
|
52 |
+
model_detections[prompt.lower()] = [rescale_bbox(prop.bbox,orig_image_shape=image.shape[:2],model_shape=predicted_image.shape[0]) for prop in props]
|
53 |
|
54 |
return model_detections
|
55 |
|
|
|
59 |
if prompt not in detections.keys():
|
60 |
print("prompt not in query ..")
|
61 |
return image_copy
|
62 |
+
for bbox in detections[prompt.lower()]:
|
63 |
cv2.rectangle(image_copy, (int(bbox[1]), int(bbox[0])), (int(bbox[3]), int(bbox[2])), (255, 0, 0), 2)
|
64 |
cv2.putText(image_copy,str(prompt),(int(bbox[1]), int(bbox[0])),cv2.FONT_HERSHEY_SIMPLEX, 2, 255)
|
65 |
return image_copy
|