osanseviero HF staff commited on
Commit
9a64f12
β€’
1 Parent(s): f92f202

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -9
app.py CHANGED
@@ -10,10 +10,9 @@ import requests
10
 
11
  model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
12
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
13
- df = {0: pd.read_csv('data.csv'), 1: pd.read_csv('data2.csv')}
14
- embeddings = {0: np.load('embeddings2.npy'), 1: np.load('embeddings.npy')}
15
- for k in [0, 1]:
16
- embeddings[k] = np.divide(embeddings[k], np.sqrt(np.sum(embeddings[k]**2, axis=1, keepdims=True)))
17
 
18
  def compute_text_embeddings(list_of_strings):
19
  inputs = processor(text=list_of_strings, return_tensors="pt", padding=True)
@@ -27,13 +26,10 @@ def download_img(path):
27
  return local_path
28
 
29
  def predict(query):
30
- corpus = 'Movies'
31
  n_results=3
32
-
33
  text_embeddings = compute_text_embeddings([query]).detach().numpy()
34
- k = 0 if corpus == 'Unsplash' else 1
35
- results = np.argsort((embeddings[k]@text_embeddings.T)[:, 0])[-1:-n_results-1:-1]
36
- paths = [download_img(df[k].iloc[i]['path']) for i in results]
37
  print(paths)
38
  return paths
39
 
10
 
11
  model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
12
  processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
13
+ df = pd.read_csv('data2.csv')
14
+ embeddings_npy = np.load('embeddings.npy')
15
+ embeddings = np.divide(embeddings_npy, np.sqrt(np.sum(embeddings_npy**2, axis=1, keepdims=True)))
 
16
 
17
  def compute_text_embeddings(list_of_strings):
18
  inputs = processor(text=list_of_strings, return_tensors="pt", padding=True)
26
  return local_path
27
 
28
  def predict(query):
 
29
  n_results=3
 
30
  text_embeddings = compute_text_embeddings([query]).detach().numpy()
31
+ results = np.argsort((embeddings@text_embeddings.T)[:, 0])[-1:-n_results-1:-1]
32
+ paths = [download_img(df.iloc[i]['path']) for i in results]
 
33
  print(paths)
34
  return paths
35