Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,49 +1,44 @@
|
|
1 |
-
import torch
|
2 |
-
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
|
3 |
-
from diffusers import DiffusionPipeline
|
4 |
import gradio as gr
|
|
|
|
|
5 |
from PIL import Image
|
|
|
|
|
|
|
6 |
|
7 |
-
|
8 |
-
model_id = "stabilityai/stable-diffusion-3-medium"
|
9 |
-
|
10 |
-
# Load the ControlNet model (use an appropriate pre-trained controlnet model)
|
11 |
-
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
|
12 |
-
|
13 |
-
# Set up the pipeline using both SD3 and ControlNet
|
14 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
15 |
-
|
16 |
-
controlnet=controlnet,
|
17 |
-
torch_dtype=torch.float16
|
18 |
)
|
|
|
|
|
19 |
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
-
|
25 |
-
def controlnet_img2img(image, prompt, strength=0.8, guidance=7.5):
|
26 |
-
image = Image.fromarray(image).convert("RGB") # Convert to RGB
|
27 |
-
|
28 |
-
# Run the pipeline
|
29 |
-
result = pipe(prompt=prompt, image=image, strength=strength, guidance_scale=guidance).images[0]
|
30 |
-
return result
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
result = controlnet_img2img(input_image, prompt)
|
35 |
-
return result
|
36 |
|
37 |
-
# Create Gradio UI
|
38 |
with gr.Blocks() as demo:
|
39 |
-
gr.Markdown("
|
40 |
-
with gr.Row():
|
41 |
-
image_input = gr.Image(source="upload", type="numpy", label="Input Image")
|
42 |
-
prompt_input = gr.Textbox(label="Prompt")
|
43 |
-
result_output = gr.Image(label="Output Image")
|
44 |
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
-
# Launch Gradio interface
|
49 |
demo.launch()
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
+
import torch
|
3 |
+
import numpy as np
|
4 |
from PIL import Image
|
5 |
+
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
|
6 |
+
from diffusers.utils import make_image_grid
|
7 |
+
import cv2
|
8 |
|
9 |
+
controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16, use_safetensors=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
pipe = StableDiffusionControlNetPipeline.from_pretrained(
|
11 |
+
"stable-diffusion-v1-5/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16, use_safetensors=True
|
|
|
|
|
12 |
)
|
13 |
+
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
|
14 |
+
pipe.enable_model_cpu_offload()
|
15 |
|
16 |
+
def generate_image(input_image, text_prompt):
|
17 |
+
original_image = np.array(input_image)
|
18 |
+
low_threshold = 100
|
19 |
+
high_threshold = 200
|
20 |
+
edges = cv2.Canny(original_image, low_threshold, high_threshold)
|
21 |
+
edges = edges[:, :, None]
|
22 |
+
canny_image = np.concatenate([edges, edges, edges], axis=2)
|
23 |
+
canny_image_pil = Image.fromarray(canny_image)
|
24 |
|
25 |
+
output_image = pipe(text_prompt, image=canny_image_pil).images[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
+
result_grid = make_image_grid([input_image, canny_image_pil, output_image], rows=1, cols=3)
|
28 |
+
return result_grid
|
|
|
|
|
29 |
|
|
|
30 |
with gr.Blocks() as demo:
|
31 |
+
gr.Markdown("# Image Transformation with ControlNet and Stable Diffusion")
|
|
|
|
|
|
|
|
|
32 |
|
33 |
+
with gr.Row():
|
34 |
+
with gr.Column():
|
35 |
+
input_image = gr.Image(type="pil", label="Upload Image", tool="editor")
|
36 |
+
text_prompt = gr.Textbox(label="Enter a prompt for the transformation")
|
37 |
+
|
38 |
+
generate_button = gr.Button("Generate Image")
|
39 |
+
|
40 |
+
result = gr.Image(label="Result", shape=(768, 256))
|
41 |
+
|
42 |
+
generate_button.click(fn=generate_image, inputs=[input_image, text_prompt], outputs=result)
|
43 |
|
|
|
44 |
demo.launch()
|