Diangle commited on
Commit
5f988b9
1 Parent(s): 8b512c1

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +116 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,116 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio
2
+ import os
3
+ import numpy as np
4
+ import pandas as pd
5
+ from IPython import display
6
+ import faiss
7
+ import torch
8
+ from transformers import AutoTokenizer, CLIPTextModelWithProjection
9
+
10
+
11
+ DATA_PATH = './data'
12
+
13
+ ft_visual_features_file = DATA_PATH + '/features/features_Cl4Cl_ckpt_webvid_retrieval_looseType_bs26_gpus2_lr7_150k_finalsample/dataset_v1_visual_features_database.npy'
14
+ binary_visual_features_file = DATA_PATH + '/features/features_Cl4Cl_ckpt_webvid_retrieval_looseType_bs26_gpus2_lr7_150k_finalsample_binary20/dataset_v1_visual_features_database_packed.npy'
15
+ ft_visual_features_database = np.load(ft_visual_features_file)
16
+ binary_visual_features = np.load(binary_visual_features_file)
17
+
18
+ database_csv_path = os.path.join(DATA_PATH, 'dataset_v1.csv')
19
+ database_df = pd.read_csv(database_csv_path)
20
+
21
+ #Gradio can display URL
22
+ def display_videos(display_df):
23
+ display_path_list = display_df['contentUrl'].to_list()
24
+
25
+ display_text_list = display_df['name'].to_list()
26
+ html = ''
27
+ for path, text in zip(display_path_list, display_text_list):
28
+ html_line = '<video autoplay loop {}> <source src="{}" type="video/mp4"> </video> <div class="caption">{}</div><br/>'.format("muted", path, text)
29
+ html += html_line
30
+ return display.HTML(html)
31
+
32
+
33
+ class NearestNeighbors:
34
+ """
35
+ Class for NearestNeighbors.
36
+ """
37
+ def __init__(self, n_neighbors=10, metric='cosine', rerank_from=-1):
38
+ """
39
+ metric = 'cosine' / 'binary'
40
+ if metric ~= 'cosine' and rerank_from > n_neighbors then a cosine rerank will be performed
41
+ """
42
+ self.n_neighbors = n_neighbors
43
+ self.metric = metric
44
+ self.rerank_from = rerank_from
45
+
46
+ def normalize(self, a):
47
+ return a / np.sum(a**2, axis=1, keepdims=True)
48
+
49
+ def fit(self, data, o_data=None):
50
+ if self.metric == 'cosine':
51
+ data = self.normalize(data)
52
+ self.index = faiss.IndexFlatIP(data.shape[1])
53
+ elif self.metric == 'binary':
54
+ self.o_data = data if o_data is None else o_data
55
+ #assuming data already packed
56
+ self.index = faiss.IndexBinaryFlat(data.shape[1]*8)
57
+ self.index.add(np.ascontiguousarray(data))
58
+
59
+ def kneighbors(self, q_data):
60
+ if self.metric == 'cosine':
61
+ print('cosine search')
62
+ q_data = self.normalize(q_data)
63
+ sim, idx = self.index.search(q_data, self.n_neighbors)
64
+ else:
65
+ if self.metric == 'binary':
66
+ print('binary search')
67
+ bq_data = np.packbits((q_data > 0.0).astype(bool), axis=1)
68
+ print(bq_data.shape, self.index.d)
69
+ sim, idx = self.index.search(bq_data, max(self.rerank_from, self.n_neighbors))
70
+
71
+ if self.rerank_from > self.n_neighbors:
72
+ sim_float = np.zeros([len(q_data), self.rerank_from], dtype=float)
73
+ for i, q in enumerate(q_data):
74
+ candidates = np.take_along_axis(self.o_data, idx[i:i+1,:].T, axis=0)
75
+ sim_float[i,:] = q @ candidates.T
76
+ sort_idx = np.argsort(sim_float[i,:])[::-1]
77
+ sim_float[i,:] = sim_float[i,:][sort_idx]
78
+ idx[i,:] = idx[i,:][sort_idx]
79
+ sim = sim_float[:,:self.n_neighbors]
80
+ idx = idx[:,:self.n_neighbors]
81
+
82
+ return sim, idx
83
+
84
+
85
+ def search(search_sentence):
86
+ my_model = CLIPTextModelWithProjection.from_pretrained("Diangle/clip4clip-webvid")
87
+ tokenizer = AutoTokenizer.from_pretrained("Diangle/clip4clip-webvid")
88
+
89
+
90
+ inputs = tokenizer(text=search_sentence , return_tensors="pt", padding=True)
91
+
92
+ outputs = my_model(input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], return_dict=False)
93
+
94
+ text_projection = my_model.state_dict()['text_projection.weight']
95
+ text_embeds = outputs[1] @ text_projection
96
+ final_output = text_embeds[torch.arange(text_embeds.shape[0]), inputs["input_ids"].argmax(dim=-1)]
97
+
98
+
99
+ final_output = final_output / final_output.norm(dim=-1, keepdim=True)
100
+ final_output = final_output.cpu().detach().numpy()
101
+ sequence_output = final_output / np.sum(final_output**2, axis=1, keepdims=True)
102
+
103
+ nn_search = NearestNeighbors(n_neighbors=5, metric='binary', rerank_from=100)
104
+ nn_search.fit(np.packbits((ft_visual_features_database > 0.0).astype(bool), axis=1), o_data=ft_visual_features_database)
105
+ sims, idxs = nn_search.kneighbors(sequence_output)
106
+ return database_df.iloc[idxs[0]]['contentUrl'].to_list()
107
+
108
+
109
+ gradio.close_all()
110
+
111
+ interface = gradio.Interface(search,
112
+ inputs=[gradio.Textbox()],
113
+ outputs=[gradio.Video(format='mp4') for _ in range(5)],
114
+ title = 'Video Search Demo',
115
+ description = 'Type some text to search by content within a video database!',
116
+ ).launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ faiss
4
+ IPython
5
+ pandas
6
+ numpy
7
+ gradio