File size: 3,231 Bytes
a637d5e
 
 
 
 
 
 
 
a56317f
e48f389
 
eaa0408
a56317f
 
a637d5e
 
 
 
 
 
 
 
 
 
 
e1197fa
 
 
 
 
 
a637d5e
e1197fa
 
 
a637d5e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e1197fa
 
 
 
 
 
 
 
 
 
a637d5e
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import os
from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
import torch
import numpy as np
from pipeline_stable_diffusion_controlnet_inpaint import *
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import *
import random

# model2
controlnet = ControlNetModel.from_pretrained("hirol/control_any5_openpose", torch_dtype=torch.float16)
pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained("hirol/Any-inpainting", controlnet=controlnet, torch_dtype=torch.float16)
pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)


def make_inpaint_condition(image, image_mask):
    image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
    image_mask = np.array(image_mask.convert("L"))
    assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size"
    image[image_mask > 128] = -1.0 # set as masked pixel
    image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
    image = torch.from_numpy(image)
    return image


def generate_image(prompt:str, negative_prompt:str, openpose_image, original_image, mask_image):
    # model1
    controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16,
                                                 cache_dir='./models')
    pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
        "./models/Any-inpainting", controlnet=controlnet, torch_dtype=torch.float16, cache_dir='./models'
    )

    pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
    pipe.to('cuda')
    
    a = random.randint(10000,90000)
    generator = torch.manual_seed(a)
    # control_image = make_inpaint_condition(original_image, mask_image)
    # images = [openpose_image, control_image]

    image = pipe(
        prompt=prompt,
        # images,
        image=original_image,
        control_image=openpose_image,
        mask_image=mask_image,
        num_inference_steps=20,
        generator=generator,
        negative_prompt=negative_prompt,
        #     controlnet_conditioning_scale=[1.0, 0.8],
    ).images[0]

    return image


def generate_image_sketch(prompt: str, negative_prompt: str, openpose_image, original_image, mask_image):
    b = random.randint(10000, 90000)
    generator = torch.manual_seed(b)
    # model2
    controlnet1 = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_scribble", torch_dtype=torch.float16,
                                                  cache_dir='./models')

    pipe1 = StableDiffusionControlNetInpaintPipeline.from_pretrained(
        "./models/Any-inpainting", controlnet=controlnet1, torch_dtype=torch.float16, cache_dir='./models'
    )

    pipe1.scheduler = UniPCMultistepScheduler.from_config(pipe1.scheduler.config)
    pipe1.to('cuda')

    image = pipe1(
        prompt=prompt,
        # images,
        image=original_image,
        control_image=openpose_image,
        mask_image=mask_image,
        num_inference_steps=20,
        generator=generator,
        negative_prompt=negative_prompt,
        #     controlnet_conditioning_scale=[1.0, 0.8],
    ).images[0]

    return [image]