Vijish commited on
Commit
c64f929
1 Parent(s): c9e7eac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -21,7 +21,7 @@ import glob
21
 
22
  # Model
23
 
24
- def predict(text,img):
25
  model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
26
  from PIL import Image
27
  img1 = Image.open(img).convert("RGB")
@@ -75,7 +75,7 @@ def predict(text,img):
75
 
76
  with torch.no_grad():
77
  # Encode and normalize the description using CLIP
78
- text_encoded = model.encode_text(clip.tokenize([text]).to(device))
79
  text_encoded /= text_encoded.norm(dim=-1, keepdim=True)
80
 
81
  similarity = text_encoded.cpu().numpy() @ image_features.cpu().numpy().T
@@ -85,11 +85,11 @@ def predict(text,img):
85
  #ipyplot.plot_images(imgs,img_width=350)
86
  return imgs[0]
87
 
88
- text = gr.inputs.Textbox(lines=1, label="Text query", placeholder="Introduce the search text...",)
89
  #img = gr.inputs.Image()
90
 
91
  #img = "image"
92
 
93
 
94
 
95
- gr.Interface(predict, ["image", text], outputs="image", title='Search inside image').launch();
 
21
 
22
  # Model
23
 
24
+ def predict(img,text):
25
  model = torch.hub.load('ultralytics/yolov5', 'yolov5s')
26
  from PIL import Image
27
  img1 = Image.open(img).convert("RGB")
 
75
 
76
  with torch.no_grad():
77
  # Encode and normalize the description using CLIP
78
+ text_encoded = model.encode_text(clip.tokenize(text).to(device))
79
  text_encoded /= text_encoded.norm(dim=-1, keepdim=True)
80
 
81
  similarity = text_encoded.cpu().numpy() @ image_features.cpu().numpy().T
 
85
  #ipyplot.plot_images(imgs,img_width=350)
86
  return imgs[0]
87
 
88
+ #text = gr.inputs.Textbox(lines=1, label="Text query", placeholder="Introduce the search text...",)
89
  #img = gr.inputs.Image()
90
 
91
  #img = "image"
92
 
93
 
94
 
95
+ gr.Interface(predict, ["image", gr.inputs.Textbox(lines=1, label="Text query", placeholder="Introduce the search text...",)], outputs="image", title='Search inside image').launch();