RobotJelly commited on
Commit
0f017d1
1 Parent(s): 38e30d4
Files changed (1) hide show
  1. app.py +11 -10
app.py CHANGED
@@ -31,7 +31,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
31
  # taking features vectors
32
  #photo_features = np.load("./features.npy")
33
 
34
- IMAGES_DIR = './photos'
35
  #def show_output_image(matched_images) :
36
  #image=[]
37
  #for photo_id in matched_images:
@@ -81,19 +81,19 @@ def display_matches(indices):
81
  def image_search(search_text, search_image, option):
82
 
83
  # Input Text Query
84
- #search_query = "The feeling when your program finally works"
85
-
86
  if option == "Text-To-Image" :
87
  # Extracting text features embeddings
88
  #text_features = encode_search_query(search_text, model, device)
89
  text_emb = model.encode([search_text], convert_to_tensor=True)
90
-
 
91
  # Find the matched Images
92
  #matched_images = find_matches(text_features, photo_features, photo_ids, 4)
93
- matched_results = util.semantic_search(text_emb, img_emb, top_k=4)[0]
94
 
95
  # top 4 highest ranked images
96
- return display_matches(matched_results)
97
  elif option == "Image-To-Image":
98
  # Input Image for Search
99
  #search_image = Image.fromarray(search_image.astype('uint8'), 'RGB')
@@ -107,13 +107,14 @@ def image_search(search_text, search_image, option):
107
  #matched_images = find_matches(image_feature, photo_ids, 4)
108
 
109
  #image_emb = model.encode(Image.open(search_image), convert_to_tensor=True)
110
- image_emb = model.encode(Image.open(search_image))
111
  # Find the matched Images
112
  #matched_images = find_matches(text_features, photo_features, photo_ids, 4)
113
  #similarity = util.cos_sim(image_emb, img_emb)
114
- matched_results = util.semantic_search(image_emb, img_emb, 4)[0]
115
-
116
- return display_matches(matched_results)
 
117
 
118
  gr.Interface(fn=image_search,
119
  inputs=[gr.inputs.Textbox(lines=7, label="Input Text"),
31
  # taking features vectors
32
  #photo_features = np.load("./features.npy")
33
 
34
+ IMAGES_DIR = Path("./photos/")
35
  #def show_output_image(matched_images) :
36
  #image=[]
37
  #for photo_id in matched_images:
81
  def image_search(search_text, search_image, option):
82
 
83
  # Input Text Query
84
+ #search_query = "The feeling when your program finally works"
 
85
  if option == "Text-To-Image" :
86
  # Extracting text features embeddings
87
  #text_features = encode_search_query(search_text, model, device)
88
  text_emb = model.encode([search_text], convert_to_tensor=True)
89
+ similarity = util.cos_sim(img_emb, text_emb)
90
+ return [Image.open(img_folder / img_names[top_k_best_image]) for top_k_best_image in torch.topk(similarity, 2, 0).indices]
91
  # Find the matched Images
92
  #matched_images = find_matches(text_features, photo_features, photo_ids, 4)
93
+ #matched_results = util.semantic_search(text_emb, img_emb, top_k=4)[0]
94
 
95
  # top 4 highest ranked images
96
+ #return display_matches(matched_results)
97
  elif option == "Image-To-Image":
98
  # Input Image for Search
99
  #search_image = Image.fromarray(search_image.astype('uint8'), 'RGB')
107
  #matched_images = find_matches(image_feature, photo_ids, 4)
108
 
109
  #image_emb = model.encode(Image.open(search_image), convert_to_tensor=True)
110
+ #image_emb = model.encode(Image.open(search_image))
111
  # Find the matched Images
112
  #matched_images = find_matches(text_features, photo_features, photo_ids, 4)
113
  #similarity = util.cos_sim(image_emb, img_emb)
114
+ #matched_results = util.semantic_search(image_emb, img_emb, 4)[0]
115
+ emb = model.encode([Image.fromarray(image)], convert_to_tensor=True)
116
+ similarity = util.cos_sim(img_emb, emb)
117
+ return [Image.open(IMAGES_DIR / img_names[top_k_best_image]) for top_k_best_image in torch.topk(similarity, 2, 0).indices]
118
 
119
  gr.Interface(fn=image_search,
120
  inputs=[gr.inputs.Textbox(lines=7, label="Input Text"),