RobotJelly commited on
Commit
950373e
1 Parent(s): 207f77e
Files changed (1) hide show
  1. app.py +8 -14
app.py CHANGED
@@ -32,9 +32,6 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
32
  #photo_features = np.load("./features.npy")
33
 
34
  IMAGES_DIR = './photos/'
35
-
36
-
37
-
38
  #def show_output_image(matched_images) :
39
  #image=[]
40
  #for photo_id in matched_images:
@@ -77,12 +74,8 @@ emb_filename = 'unsplash-25k-photos-embeddings.pkl'
77
  with open(emb_filename, 'rb') as fIn:
78
  img_names, img_emb = pickle.load(fIn)
79
 
80
- def display_matches(similarity):
81
- best_matched_images = []
82
- indices = torch.topk(input=similarity, k=4, dim=0)
83
- for best_img_idx in indices:
84
- img = Image.open(IMAGES_DIR / img_names[best_img_idx])
85
- best_matched_images.append(img)
86
  return best_matched_images
87
 
88
  def image_search(search_text, search_image, option):
@@ -93,14 +86,14 @@ def image_search(search_text, search_image, option):
93
  if option == "Text-To-Image" :
94
  # Extracting text features embeddings
95
  #text_features = encode_search_query(search_text, model, device)
96
- text_emb = model.encode([search_text], convert_to_tensor=True)
97
 
98
  # Find the matched Images
99
  #matched_images = find_matches(text_features, photo_features, photo_ids, 4)
100
- similarity = util.cos_sim(text_emb, img_emb)
101
 
102
  # top 4 highest ranked images
103
- return display_matches(similarity)
104
  elif option == "Image-To-Image":
105
  # Input Image for Search
106
  #search_image = Image.fromarray(search_image.astype('uint8'), 'RGB')
@@ -117,9 +110,10 @@ def image_search(search_text, search_image, option):
117
 
118
  # Find the matched Images
119
  #matched_images = find_matches(text_features, photo_features, photo_ids, 4)
120
- similarity = util.cos_sim(image_emb, img_emb)
 
121
 
122
- return display_matches(similarity)
123
 
124
  gr.Interface(fn=image_search,
125
  inputs=[gr.inputs.Textbox(lines=7, label="Input Text"),
32
  #photo_features = np.load("./features.npy")
33
 
34
  IMAGES_DIR = './photos/'
 
 
 
35
  #def show_output_image(matched_images) :
36
  #image=[]
37
  #for photo_id in matched_images:
74
  with open(emb_filename, 'rb') as fIn:
75
  img_names, img_emb = pickle.load(fIn)
76
 
77
+ def display_matches(indices):
78
+ best_matched_images = [Image.open(os.path.join("photos/", img_names[best_img['corpus_id']])) for best_img in indices]
 
 
 
 
79
  return best_matched_images
80
 
81
  def image_search(search_text, search_image, option):
86
  if option == "Text-To-Image" :
87
  # Extracting text features embeddings
88
  #text_features = encode_search_query(search_text, model, device)
89
+ text_emb = model.encode([search_text], convert_to_tensor=True)
90
 
91
  # Find the matched Images
92
  #matched_images = find_matches(text_features, photo_features, photo_ids, 4)
93
+ matched_results = util.semantic_search(text_emb, img_emb, 4)[0]
94
 
95
  # top 4 highest ranked images
96
+ return display_matches(matched_results)
97
  elif option == "Image-To-Image":
98
  # Input Image for Search
99
  #search_image = Image.fromarray(search_image.astype('uint8'), 'RGB')
110
 
111
  # Find the matched Images
112
  #matched_images = find_matches(text_features, photo_features, photo_ids, 4)
113
+ #similarity = util.cos_sim(image_emb, img_emb)
114
+ matched_results = util.semantic_search(image_emb, img_emb, 4)[0]
115
 
116
+ return display_matches(matched_results)
117
 
118
  gr.Interface(fn=image_search,
119
  inputs=[gr.inputs.Textbox(lines=7, label="Input Text"),