Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -51,14 +51,17 @@ ENDING = """For search acceleration capabilities, please refer to [Searchium.ai]
|
|
51 |
"""
|
52 |
|
53 |
|
54 |
-
DATA_PATH = './
|
55 |
|
56 |
ft_visual_features_file = DATA_PATH + '/video_dataset_visual_features_database.npy'
|
|
|
57 |
|
58 |
#load database features:
|
59 |
-
|
|
|
|
|
60 |
|
61 |
-
database_csv_path = os.path.join(DATA_PATH, '
|
62 |
database_df = pd.read_csv(database_csv_path)
|
63 |
|
64 |
class NearestNeighbors:
|
@@ -117,7 +120,7 @@ model = CLIPTextModelWithProjection.from_pretrained("Searchium-ai/clip4clip-webv
|
|
117 |
tokenizer = CLIPTokenizer.from_pretrained("Searchium-ai/clip4clip-webvid150k")
|
118 |
|
119 |
nn_search = NearestNeighbors(n_neighbors=5, metric='binary', rerank_from=100)
|
120 |
-
nn_search.fit(
|
121 |
|
122 |
def search(search_sentence):
|
123 |
inputs = tokenizer(text=search_sentence , return_tensors="pt")
|
|
|
51 |
"""
|
52 |
|
53 |
|
54 |
+
DATA_PATH = './data'
|
55 |
|
56 |
ft_visual_features_file = DATA_PATH + '/video_dataset_visual_features_database.npy'
|
57 |
+
ft_visual_features_file_bin = DATA_PATH + '/video_dataset_visual_features_database_packed.npy'
|
58 |
|
59 |
#load database features:
|
60 |
+
ft_visual_features_database_bin = np.load(ft_visual_features_file_bin)
|
61 |
+
ft_visual_features_database = p.memmap(ft_visual_features_file, dtype='float32', mode='r',
|
62 |
+
shape=(ft_visual_features_database_bin.shape[0], ft_visual_features_database_bin.shape[1]*8))
|
63 |
|
64 |
+
database_csv_path = os.path.join(DATA_PATH, 'video_dataset.csv')
|
65 |
database_df = pd.read_csv(database_csv_path)
|
66 |
|
67 |
class NearestNeighbors:
|
|
|
120 |
tokenizer = CLIPTokenizer.from_pretrained("Searchium-ai/clip4clip-webvid150k")
|
121 |
|
122 |
nn_search = NearestNeighbors(n_neighbors=5, metric='binary', rerank_from=100)
|
123 |
+
nn_search.fit(ft_visual_features_database_bin, o_data=ft_visual_features_database)
|
124 |
|
125 |
def search(search_sentence):
|
126 |
inputs = tokenizer(text=search_sentence , return_tensors="pt")
|