File size: 1,288 Bytes
100e00d
fc5f68b
ae8d0c8
fc5f68b
 
ae8d0c8
 
fc5f68b
 
ae8d0c8
fc5f68b
 
ae8d0c8
fc5f68b
ae8d0c8
fc5f68b
 
 
 
 
 
ae8d0c8
fc5f68b
 
 
 
 
 
 
 
 
 
 
 
100e00d
 
 
910923e
100e00d
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
import gradio as gr
from diffusers import AutoencoderKL, StableDiffusionXLControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
import torch
from controlnet_aux import OpenposeDetector
from diffusers.utils import load_image


# Compute openpose conditioning image.
openpose = OpenposeDetector.from_pretrained("lllyasviel/ControlNet")

image = load_image(
    "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/person.png"
)
openpose_image = openpose(image)

# Initialize ControlNet pipeline.
controlnet = ControlNetModel.from_pretrained("thibaud/controlnet-openpose-sdxl-1.0")
pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
    "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet
)
#pipe.enable_model_cpu_offload()

def pose_calc():
    # Infer.
    prompt = "Darth vader dancing in a desert, high quality"
    negative_prompt = "low quality, bad quality"
    images = pipe(
        prompt, 
        negative_prompt=negative_prompt,
        num_inference_steps=25,
        num_images_per_prompt=4,
        image=openpose_image.resize((1024, 1024)),
        generator=torch.manual_seed(97),
    ).images
    return images[0]

gr.Interface(fn=pose_calc,
            inputs=None,
            outputs=gr.Image
).launch()