IbrahimHasani commited on
Commit
ab46ec6
1 Parent(s): 0186279

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -31
app.py CHANGED
@@ -2,25 +2,22 @@ import gradio as gr
2
  import torch
3
  import numpy as np
4
  from transformers import OwlViTProcessor, OwlViTForObjectDetection
5
- from torchvision import transforms
6
  from PIL import Image, ImageDraw
7
  import cv2
8
  import torch.nn.functional as F
9
  import tempfile
10
- import os
11
- from SuperGluePretrainedNetwork.models.matching import Matching
12
- from SuperGluePretrainedNetwork.models.utils import read_image
13
  import matplotlib.pyplot as plt
14
  import matplotlib.cm as cm
15
  from io import BytesIO
 
 
16
 
17
  # Set device
18
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
19
 
20
  # Load models
21
- mixin = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32")
22
  processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32")
23
- model = mixin.to(device)
24
 
25
  matching = Matching({
26
  'superpoint': {'nms_radius': 4, 'keypoint_threshold': 0.005, 'max_keypoints': 1024},
@@ -28,14 +25,6 @@ matching = Matching({
28
  }).eval().to(device)
29
 
30
  # Utility functions
31
- def preprocess_image(image):
32
- transform = transforms.Compose([
33
- transforms.Resize((224, 224)),
34
- transforms.ToTensor(),
35
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
36
- ])
37
- return transform(image).unsqueeze(0)
38
-
39
  def save_array_to_temp_image(arr):
40
  rgb_arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
41
  img = Image.fromarray(rgb_arr)
@@ -45,24 +34,7 @@ def save_array_to_temp_image(arr):
45
  img.save(temp_file_name)
46
  return temp_file_name
47
 
48
- def stitch_images(images):
49
- if not images:
50
- return Image.new('RGB', (100, 100), color='gray')
51
-
52
- max_width = max([img.width for img in images])
53
- total_height = sum(img.height for img in images)
54
-
55
- composite = Image.new('RGB', (max_width, total_height))
56
-
57
- y_offset = 0
58
- for img in images:
59
- composite.paste(img, (0, y_offset))
60
- y_offset += img.height
61
-
62
- return composite
63
-
64
  def unified_matching_plot2(image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, text, path=None, show_keypoints=False, fast_viz=False, opencv_display=False, opencv_title='matches', small_text=[]):
65
- # Resize images to have the same height
66
  height = min(image0.shape[0], image1.shape[0])
67
  image0_resized = cv2.resize(image0, (int(image0.shape[1] * height / image0.shape[0]), height))
68
  image1_resized = cv2.resize(image1, (int(image1.shape[1] * height / image1.shape[0]), height))
 
2
  import torch
3
  import numpy as np
4
  from transformers import OwlViTProcessor, OwlViTForObjectDetection
 
5
  from PIL import Image, ImageDraw
6
  import cv2
7
  import torch.nn.functional as F
8
  import tempfile
 
 
 
9
  import matplotlib.pyplot as plt
10
  import matplotlib.cm as cm
11
  from io import BytesIO
12
+ from SuperGluePretrainedNetwork.models.matching import Matching
13
+ from SuperGluePretrainedNetwork.models.utils import read_image
14
 
15
  # Set device
16
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
17
 
18
  # Load models
19
+ model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32").to(device)
20
  processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32")
 
21
 
22
  matching = Matching({
23
  'superpoint': {'nms_radius': 4, 'keypoint_threshold': 0.005, 'max_keypoints': 1024},
 
25
  }).eval().to(device)
26
 
27
  # Utility functions
 
 
 
 
 
 
 
 
28
  def save_array_to_temp_image(arr):
29
  rgb_arr = cv2.cvtColor(arr, cv2.COLOR_BGR2RGB)
30
  img = Image.fromarray(rgb_arr)
 
34
  img.save(temp_file_name)
35
  return temp_file_name
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  def unified_matching_plot2(image0, image1, kpts0, kpts1, mkpts0, mkpts1, color, text, path=None, show_keypoints=False, fast_viz=False, opencv_display=False, opencv_title='matches', small_text=[]):
 
38
  height = min(image0.shape[0], image1.shape[0])
39
  image0_resized = cv2.resize(image0, (int(image0.shape[1] * height / image0.shape[0]), height))
40
  image1_resized = cv2.resize(image1, (int(image1.shape[1] * height / image1.shape[0]), height))