AlekseyCalvin
commited on
Commit
•
46c579e
1
Parent(s):
03f693b
Update app.py
Browse files
app.py
CHANGED
@@ -40,22 +40,22 @@ with open('loras.json', 'r') as f:
|
|
40 |
#base_model = "stabilityai/stable-diffusion-3.5-large"
|
41 |
pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-3.5-large-turbo", torch_dtype=torch.bfloat16)
|
42 |
|
43 |
-
clipmodel = 'norm'
|
44 |
-
if clipmodel == "long":
|
45 |
-
model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
|
46 |
-
config = CLIPConfig.from_pretrained(model_id)
|
47 |
-
maxtokens = 248
|
48 |
-
if clipmodel == "norm":
|
49 |
-
model_id = "zer0int/CLIP-GmP-ViT-L-14"
|
50 |
-
config = CLIPConfig.from_pretrained(model_id)
|
51 |
-
maxtokens = 77
|
52 |
-
clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
|
53 |
-
clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
|
54 |
-
|
55 |
-
pipe.tokenizer = clip_processor.tokenizer
|
56 |
-
pipe.text_encoder = clip_model.text_model
|
57 |
-
pipe.tokenizer_max_length = maxtokens
|
58 |
-
pipe.text_encoder.dtype = torch.bfloat16
|
59 |
|
60 |
|
61 |
#pipe.transformer.to(memory_format=torch.channels_last)
|
@@ -197,7 +197,7 @@ with gr.Blocks(theme=gr.themes.Soft(), css=css) as app:
|
|
197 |
with gr.Column():
|
198 |
with gr.Row():
|
199 |
cfg_scale = gr.Slider(label="CFG Scale", minimum=0, maximum=20, step=.1, value=1.0)
|
200 |
-
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=
|
201 |
|
202 |
with gr.Row():
|
203 |
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
|
|
|
40 |
#base_model = "stabilityai/stable-diffusion-3.5-large"
|
41 |
pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-3.5-large-turbo", torch_dtype=torch.bfloat16)
|
42 |
|
43 |
+
#clipmodel = 'norm'
|
44 |
+
#if clipmodel == "long":
|
45 |
+
# model_id = "zer0int/LongCLIP-GmP-ViT-L-14"
|
46 |
+
# config = CLIPConfig.from_pretrained(model_id)
|
47 |
+
# maxtokens = 248
|
48 |
+
#if clipmodel == "norm":
|
49 |
+
# model_id = "zer0int/CLIP-GmP-ViT-L-14"
|
50 |
+
# config = CLIPConfig.from_pretrained(model_id)
|
51 |
+
# maxtokens = 77
|
52 |
+
#clip_model = CLIPModel.from_pretrained(model_id, torch_dtype=torch.bfloat16, config=config, ignore_mismatched_sizes=True).to("cuda")
|
53 |
+
#clip_processor = CLIPProcessor.from_pretrained(model_id, padding="max_length", max_length=maxtokens, ignore_mismatched_sizes=True, return_tensors="pt", truncation=True)
|
54 |
+
|
55 |
+
#pipe.tokenizer = clip_processor.tokenizer
|
56 |
+
#pipe.text_encoder = clip_model.text_model
|
57 |
+
#pipe.tokenizer_max_length = maxtokens
|
58 |
+
#pipe.text_encoder.dtype = torch.bfloat16
|
59 |
|
60 |
|
61 |
#pipe.transformer.to(memory_format=torch.channels_last)
|
|
|
197 |
with gr.Column():
|
198 |
with gr.Row():
|
199 |
cfg_scale = gr.Slider(label="CFG Scale", minimum=0, maximum=20, step=.1, value=1.0)
|
200 |
+
steps = gr.Slider(label="Steps", minimum=1, maximum=50, step=1, value=4)
|
201 |
|
202 |
with gr.Row():
|
203 |
width = gr.Slider(label="Width", minimum=256, maximum=1536, step=64, value=1024)
|