File size: 976 Bytes
1265cca 03840de 1265cca 03840de 1265cca 03840de 1265cca |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
import gradio as gr
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
import torch
controlnet = ControlNetModel.from_pretrained(
"lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16
)
pipe = StableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5",
controlnet=controlnet,
torch_dtype=torch.float16,
safety_checker=None
)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
pipe.to("cuda" if torch.cuda.is_available() else "cpu")
def generate(image, prompt="a person posing"):
result = pipe(prompt=prompt, image=image, num_inference_steps=20).images[0]
return result
demo = gr.Interface(
fn=generate,
inputs=[gr.Image(type="pil"), gr.Textbox(label="Prompt")],
outputs="image",
title="Pose Generator",
description="Upload an image and enter a prompt to generate a ControlNet-based pose output."
)
demo.launch()
|