Update app.py
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ import gradio as gr
|
|
6 |
import numpy as np
|
7 |
import PIL.Image
|
8 |
import torch
|
|
|
9 |
from diffusers import DDPMScheduler, StableDiffusionXLAdapterPipeline, T2IAdapter
|
10 |
|
11 |
DESCRIPTION = "# T2I-Adapter-SDXL Sketch"
|
@@ -16,12 +17,7 @@ if not torch.cuda.is_available():
|
|
16 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
17 |
if torch.cuda.is_available():
|
18 |
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
19 |
-
adapter = T2IAdapter.from_pretrained(
|
20 |
-
"Adapter/t2iadapter",
|
21 |
-
subfolder="sketch_sdxl_1.0",
|
22 |
-
torch_dtype=torch.float16,
|
23 |
-
adapter_type="full_adapter_xl",
|
24 |
-
)
|
25 |
scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler")
|
26 |
pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
|
27 |
model_id,
|
@@ -48,12 +44,14 @@ def run(
|
|
48 |
image: PIL.Image.Image,
|
49 |
prompt: str,
|
50 |
negative_prompt: str,
|
51 |
-
num_steps=
|
52 |
guidance_scale=7.5,
|
|
|
53 |
seed=0,
|
54 |
) -> PIL.Image.Image:
|
55 |
-
|
56 |
-
image =
|
|
|
57 |
|
58 |
generator = torch.Generator(device=device).manual_seed(seed)
|
59 |
out = pipe(
|
@@ -63,6 +61,7 @@ def run(
|
|
63 |
num_inference_steps=num_steps,
|
64 |
generator=generator,
|
65 |
guidance_scale=guidance_scale,
|
|
|
66 |
).images[0]
|
67 |
return out
|
68 |
|
|
|
6 |
import numpy as np
|
7 |
import PIL.Image
|
8 |
import torch
|
9 |
+
import torchvision.transforms.functional as TF
|
10 |
from diffusers import DDPMScheduler, StableDiffusionXLAdapterPipeline, T2IAdapter
|
11 |
|
12 |
DESCRIPTION = "# T2I-Adapter-SDXL Sketch"
|
|
|
17 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
18 |
if torch.cuda.is_available():
|
19 |
model_id = "stabilityai/stable-diffusion-xl-base-1.0"
|
20 |
+
adapter = T2IAdapter.from_pretrained("TencentARC/t2i-adapter-sketch-sdxl-1.0", torch_dtype=torch.float16, variant="fp16")
|
|
|
|
|
|
|
|
|
|
|
21 |
scheduler = DDPMScheduler.from_pretrained(model_id, subfolder="scheduler")
|
22 |
pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
|
23 |
model_id,
|
|
|
44 |
image: PIL.Image.Image,
|
45 |
prompt: str,
|
46 |
negative_prompt: str,
|
47 |
+
num_steps=25,
|
48 |
guidance_scale=7.5,
|
49 |
+
adapter_conditioning_scale=0.8
|
50 |
seed=0,
|
51 |
) -> PIL.Image.Image:
|
52 |
+
image = image.convert("RGB").resize((1024, 1024))
|
53 |
+
image = TF.to_tensor(image) > 0.5
|
54 |
+
image = TF.to_pil_image(image.to(torch.float32))
|
55 |
|
56 |
generator = torch.Generator(device=device).manual_seed(seed)
|
57 |
out = pipe(
|
|
|
61 |
num_inference_steps=num_steps,
|
62 |
generator=generator,
|
63 |
guidance_scale=guidance_scale,
|
64 |
+
adapter_conditioning_scale=adapter_conditioning_scale,
|
65 |
).images[0]
|
66 |
return out
|
67 |
|