Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -43,25 +43,14 @@ def set_timesteps_patched(self, num_inference_steps: int, device = None):
|
|
43 |
|
44 |
# Image Editor
|
45 |
edit_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl_edit.safetensors")
|
46 |
-
normal_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl.safetensors")
|
47 |
-
|
48 |
EDMEulerScheduler.set_timesteps = set_timesteps_patched
|
49 |
-
|
50 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
51 |
-
|
52 |
pipe_edit = StableDiffusionXLInstructPix2PixPipeline.from_single_file(
|
53 |
edit_file, num_in_channels=8, is_cosxl_edit=True, vae=vae, torch_dtype=torch.float16,
|
54 |
)
|
55 |
pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction")
|
56 |
pipe_edit.to("cuda")
|
57 |
|
58 |
-
from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
|
59 |
-
|
60 |
-
if not torch.cuda.is_available():
|
61 |
-
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
|
62 |
-
|
63 |
-
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
64 |
-
|
65 |
# Generator
|
66 |
@spaces.GPU(duration=30, queue=False)
|
67 |
def king(type ,
|
@@ -98,7 +87,7 @@ def king(type ,
|
|
98 |
generator = torch.Generator().manual_seed(seed)
|
99 |
image = pipe(
|
100 |
prompt = instruction,
|
101 |
-
guidance_scale =
|
102 |
num_inference_steps = steps,
|
103 |
width = width,
|
104 |
height = height,
|
@@ -205,7 +194,7 @@ with gr.Blocks(css=css) as demo:
|
|
205 |
inputs=[type,input_image, instruction],
|
206 |
fn=king,
|
207 |
outputs=[input_image],
|
208 |
-
cache_examples=
|
209 |
)
|
210 |
|
211 |
gr.Markdown(help_text)
|
|
|
43 |
|
44 |
# Image Editor
|
45 |
edit_file = hf_hub_download(repo_id="stabilityai/cosxl", filename="cosxl_edit.safetensors")
|
|
|
|
|
46 |
EDMEulerScheduler.set_timesteps = set_timesteps_patched
|
|
|
47 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
|
|
48 |
pipe_edit = StableDiffusionXLInstructPix2PixPipeline.from_single_file(
|
49 |
edit_file, num_in_channels=8, is_cosxl_edit=True, vae=vae, torch_dtype=torch.float16,
|
50 |
)
|
51 |
pipe_edit.scheduler = EDMEulerScheduler(sigma_min=0.002, sigma_max=120.0, sigma_data=1.0, prediction_type="v_prediction")
|
52 |
pipe_edit.to("cuda")
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
# Generator
|
55 |
@spaces.GPU(duration=30, queue=False)
|
56 |
def king(type ,
|
|
|
87 |
generator = torch.Generator().manual_seed(seed)
|
88 |
image = pipe(
|
89 |
prompt = instruction,
|
90 |
+
guidance_scale = 7,
|
91 |
num_inference_steps = steps,
|
92 |
width = width,
|
93 |
height = height,
|
|
|
194 |
inputs=[type,input_image, instruction],
|
195 |
fn=king,
|
196 |
outputs=[input_image],
|
197 |
+
cache_examples=True,
|
198 |
)
|
199 |
|
200 |
gr.Markdown(help_text)
|