Upload app.py
Browse files
app.py
CHANGED
@@ -64,7 +64,7 @@ def change_base_model(repo_id: str, cn_on: bool): # , progress=gr.Progress(track
|
|
64 |
#clear_cache()
|
65 |
#controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union_repo, torch_dtype=dtype).to(device)
|
66 |
#controlnet = FluxMultiControlNetModel([controlnet_union]).to(device)
|
67 |
-
pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=dtype)
|
68 |
#pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
69 |
last_model = repo_id
|
70 |
last_cn_on = cn_on
|
@@ -74,7 +74,7 @@ def change_base_model(repo_id: str, cn_on: bool): # , progress=gr.Progress(track
|
|
74 |
#progress(0, desc=f"Loading model: {repo_id}")
|
75 |
print(f"Loading model: {repo_id}")
|
76 |
#clear_cache()
|
77 |
-
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype
|
78 |
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
79 |
last_model = repo_id
|
80 |
last_cn_on = cn_on
|
@@ -149,6 +149,7 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
|
|
149 |
try:
|
150 |
#good_vae.to("cuda")
|
151 |
#taef1.to("cuda")
|
|
|
152 |
pipe.to("cuda")
|
153 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
154 |
|
|
|
64 |
#clear_cache()
|
65 |
#controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union_repo, torch_dtype=dtype).to(device)
|
66 |
#controlnet = FluxMultiControlNetModel([controlnet_union]).to(device)
|
67 |
+
pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=dtype)#.to(device)
|
68 |
#pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
69 |
last_model = repo_id
|
70 |
last_cn_on = cn_on
|
|
|
74 |
#progress(0, desc=f"Loading model: {repo_id}")
|
75 |
print(f"Loading model: {repo_id}")
|
76 |
#clear_cache()
|
77 |
+
pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)#, vae=taef1 .to(device)
|
78 |
pipe.flux_pipe_call_that_returns_an_iterable_of_images = flux_pipe_call_that_returns_an_iterable_of_images.__get__(pipe)
|
79 |
last_model = repo_id
|
80 |
last_cn_on = cn_on
|
|
|
149 |
try:
|
150 |
#good_vae.to("cuda")
|
151 |
#taef1.to("cuda")
|
152 |
+
pipe.vae = taef1
|
153 |
pipe.to("cuda")
|
154 |
generator = torch.Generator(device="cuda").manual_seed(seed)
|
155 |
|