Spaces:
Runtime error
Runtime error
pharmapsychotic
commited on
Commit
•
a8f45ac
1
Parent(s):
3ff6909
Temporarily disable model switching since it's dying on new 14GB RAM limit
Browse files
app.py
CHANGED
@@ -3,30 +3,31 @@ import gradio as gr
|
|
3 |
from clip_interrogator import Config, Interrogator
|
4 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
5 |
|
6 |
-
MODELS = ['ViT-L (best for Stable Diffusion 1.*)'
|
7 |
|
8 |
# load BLIP and ViT-L https://huggingface.co/openai/clip-vit-large-patch14
|
9 |
config = Config(clip_model_name="ViT-L-14/openai")
|
10 |
ci_vitl = Interrogator(config)
|
11 |
-
ci_vitl.clip_model = ci_vitl.clip_model.to("cpu")
|
12 |
|
13 |
# load ViT-H https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K
|
14 |
-
config.blip_model = ci_vitl.blip_model
|
15 |
-
config.clip_model_name = "ViT-H-14/laion2b_s32b_b79k"
|
16 |
-
ci_vith = Interrogator(config)
|
17 |
-
ci_vith.clip_model = ci_vith.clip_model.to("cpu")
|
18 |
|
19 |
|
20 |
def image_analysis(image, clip_model_name):
|
21 |
# move selected model to GPU and other model to CPU
|
22 |
-
if clip_model_name == MODELS[0]:
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
else:
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
30 |
|
31 |
image = image.convert('RGB')
|
32 |
image_features = ci.image_to_features(image)
|
@@ -48,14 +49,15 @@ def image_analysis(image, clip_model_name):
|
|
48 |
|
49 |
def image_to_prompt(image, clip_model_name, mode):
|
50 |
# move selected model to GPU and other model to CPU
|
51 |
-
if clip_model_name == MODELS[0]:
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
else:
|
56 |
-
|
57 |
-
|
58 |
-
|
|
|
59 |
|
60 |
ci.config.blip_num_beams = 64
|
61 |
ci.config.chunk_size = 2048
|
|
|
3 |
from clip_interrogator import Config, Interrogator
|
4 |
from share_btn import community_icon_html, loading_icon_html, share_js
|
5 |
|
6 |
+
MODELS = ['ViT-L (best for Stable Diffusion 1.*)']#, 'ViT-H (best for Stable Diffusion 2.*)']
|
7 |
|
8 |
# load BLIP and ViT-L https://huggingface.co/openai/clip-vit-large-patch14
|
9 |
config = Config(clip_model_name="ViT-L-14/openai")
|
10 |
ci_vitl = Interrogator(config)
|
11 |
+
# ci_vitl.clip_model = ci_vitl.clip_model.to("cpu")
|
12 |
|
13 |
# load ViT-H https://huggingface.co/laion/CLIP-ViT-H-14-laion2B-s32B-b79K
|
14 |
+
# config.blip_model = ci_vitl.blip_model
|
15 |
+
# config.clip_model_name = "ViT-H-14/laion2b_s32b_b79k"
|
16 |
+
# ci_vith = Interrogator(config)
|
17 |
+
# ci_vith.clip_model = ci_vith.clip_model.to("cpu")
|
18 |
|
19 |
|
20 |
def image_analysis(image, clip_model_name):
|
21 |
# move selected model to GPU and other model to CPU
|
22 |
+
# if clip_model_name == MODELS[0]:
|
23 |
+
# ci_vith.clip_model = ci_vith.clip_model.to("cpu")
|
24 |
+
# ci_vitl.clip_model = ci_vitl.clip_model.to(ci_vitl.device)
|
25 |
+
# ci = ci_vitl
|
26 |
+
# else:
|
27 |
+
# ci_vitl.clip_model = ci_vitl.clip_model.to("cpu")
|
28 |
+
# ci_vith.clip_model = ci_vith.clip_model.to(ci_vith.device)
|
29 |
+
# ci = ci_vith
|
30 |
+
ci = ci_vitl
|
31 |
|
32 |
image = image.convert('RGB')
|
33 |
image_features = ci.image_to_features(image)
|
|
|
49 |
|
50 |
def image_to_prompt(image, clip_model_name, mode):
|
51 |
# move selected model to GPU and other model to CPU
|
52 |
+
# if clip_model_name == MODELS[0]:
|
53 |
+
# ci_vith.clip_model = ci_vith.clip_model.to("cpu")
|
54 |
+
# ci_vitl.clip_model = ci_vitl.clip_model.to(ci_vitl.device)
|
55 |
+
# ci = ci_vitl
|
56 |
+
# else:
|
57 |
+
# ci_vitl.clip_model = ci_vitl.clip_model.to("cpu")
|
58 |
+
# ci_vith.clip_model = ci_vith.clip_model.to(ci_vith.device)
|
59 |
+
# ci = ci_vith
|
60 |
+
ci = ci_vitl
|
61 |
|
62 |
ci.config.blip_num_beams = 64
|
63 |
ci.config.chunk_size = 2048
|