Update app.py
Browse files
app.py
CHANGED
@@ -273,17 +273,15 @@ device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
273 |
# from clip_retrieval.load_clip import load_clip, get_tokenizer
|
274 |
# model, preprocess = load_clip(clip_model, use_jit=True, device=device)
|
275 |
# tokenizer = get_tokenizer(clip_model)
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
# results = clip_retrieval_client.query(text="an image of a cat")
|
286 |
-
# results[0]
|
287 |
|
288 |
examples = [
|
289 |
# ["SohoJoeEth.jpeg", "Ray-Liotta-Goodfellas.jpg", "SohoJoeEth + Ray.jpeg"],
|
|
|
273 |
# from clip_retrieval.load_clip import load_clip, get_tokenizer
|
274 |
# model, preprocess = load_clip(clip_model, use_jit=True, device=device)
|
275 |
# tokenizer = get_tokenizer(clip_model)
|
276 |
+
# clip_retrieval_client = ClipClient(
|
277 |
+
# url=clip_retrieval_service_url,
|
278 |
+
# indice_name=clip_model_id,
|
279 |
+
# use_safety_model = False,
|
280 |
+
# use_violence_detector = False,
|
281 |
+
# # modality = Modality.TEXT,
|
282 |
+
# )
|
283 |
+
model, preprocess, tokenizer, clip_retrieval_client = None, None, None, None
|
284 |
+
|
|
|
|
|
285 |
|
286 |
examples = [
|
287 |
# ["SohoJoeEth.jpeg", "Ray-Liotta-Goodfellas.jpg", "SohoJoeEth + Ray.jpeg"],
|