from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler from diffusers.utils import load_image, make_image_grid from PIL import Image import numpy as np import gradio as gr import torch import cv2 import random def launch(image_input, prompt, negative_prompt, sd_models_dropdown): return predict(image_input, prompt, negative_prompt, sd_models_dropdown) def predict(image_input, prompt, negative_prompt, sd_models_dropdown): image = np.array(image_input) # Get canny image image = cv2.Canny(image, 100, 200) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) selected_model = f"""{sd_models_dropdown}""" pipe = StableDiffusionPipeline.from_pretrained( selected_model, torch_dtype=torch.float16 ) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" negative_promt="" generator = torch.manual_seed(random.randint(0, 1000)) image = pipe( prompt=prompt, negative_prompt=negative_prompt, num_inference_steps=65, generator=generator, image=canny_image, strength=0.15, guidance_scale=15.5 ).images[0] return image # SYSTEM INPUTS image_input = gr.Image(label="Upload iamge candidate", type="filepath") prompt = gr.Textbox(type="text", promt="Prompt") negative_prompt = gr.Textbox(type="text", promt="Negative prompt") sd_models_dropdown = gr.Dropdown( [ "runwayml/stable-diffusion-v1-5", ], label="Stable Diffusion Models") # SYSTEM OUTPUTS output_image = gr.outputs.Image() demo = gr.Interface( launch, inputs=[image_input, prompt, negative_prompt, sd_models_dropdown], outputs=output_image, title="Stable Diffusion with ControlNet", ) if __name__ == "__main__": demo.launch()