File size: 7,378 Bytes
4efc065
 
17581a5
4efc065
 
 
744d735
a93f6d3
4efc065
 
 
 
 
 
 
 
 
56e3148
0687973
 
4400ef7
bf36194
01f7239
 
 
 
0d43509
5a8046e
744d735
 
 
1a72c84
744d735
 
17581a5
744d735
 
17581a5
744d735
 
c127bd9
5a8046e
4efc065
4400ef7
6d4ad23
 
4efc065
 
 
0a096db
33d1a30
 
 
 
 
 
55ca821
4efc065
6d4ad23
86c6ead
6d4ad23
 
86c6ead
0129d6f
6d4ad23
86c6ead
6d4ad23
5a8046e
0129d6f
4efc065
 
 
 
 
 
 
 
 
 
79bb82a
 
14da090
 
 
 
 
01f7239
e849c2a
 
 
 
 
 
01f7239
4efc065
 
 
ed1bd2d
 
4efc065
 
 
a93f6d3
 
0687973
14da090
 
 
dee1946
14da090
 
 
 
 
 
 
 
 
0687973
56e3148
14da090
5a8046e
4efc065
51d682c
0129d6f
91b2943
 
a678ae0
0129d6f
91b2943
0a096db
9d60b6e
0129d6f
79bb82a
0129d6f
 
81e129b
4e2d172
81e129b
0129d6f
 
91b2943
 
0129d6f
3f9c34e
0129d6f
 
81e129b
4e2d172
81e129b
0129d6f
91b2943
 
0a096db
4efc065
 
2b2b1b5
744d735
5a8046e
 
d4f30b7
0d43509
744d735
135d9ff
1a72c84
7c87b47
744d735
 
d4f30b7
1a72c84
 
 
d4f30b7
744d735
b496ec8
 
5a8046e
0a096db
4efc065
 
 
 
 
 
 
bd9343e
 
 
 
 
 
 
 
 
0d43509
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
from typing import  Dict, List, Any
import torch
from diffusers import DPMSolverMultistepScheduler, EulerDiscreteScheduler, EulerDiscreteScheduler, DDIMScheduler, StableDiffusionInpaintPipeline, AutoPipelineForInpainting, AutoPipelineForImage2Image, DiffusionPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionControlNetInpaintPipeline, ControlNetModel, StableDiffusionPipeline
from PIL import Image
import base64
from io import BytesIO
import numpy as np
import cv2

# set device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

if device.type != 'cuda':
    raise ValueError("need to run on GPU")

class EndpointHandler():
    def __init__(self, path=""):

        #self.fast_pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
        #self.generator = torch.Generator(device="cuda").manual_seed(0)


        # self.smooth_pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained(
        #     "stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True
        # )
        # self.smooth_pipe.to("cuda")

        
        self.controlnet = ControlNetModel.from_pretrained(
            "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
        )


        self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
            "runwayml/stable-diffusion-v1-5", controlnet=self.controlnet, torch_dtype=torch.float16
        )

        self.pipe.scheduler = EulerDiscreteScheduler.from_config(self.pipe.scheduler.config)
        self.pipe.enable_model_cpu_offload()
        self.pipe.enable_xformers_memory_efficient_attention()
        
        """
        # load StableDiffusionInpaintPipeline pipeline
        self.pipe = AutoPipelineForInpainting.from_pretrained(
            "runwayml/stable-diffusion-inpainting",
            revision="fp16",
            torch_dtype=torch.float16,
        )
        # use DPMSolverMultistepScheduler
        self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.scheduler.config)

        self.pipe.enable_model_cpu_offload()

        self.pipe.enable_xformers_memory_efficient_attention()
        
        # move to device
        #self.pipe = self.pipe.to(device)

        self.pipe2 = AutoPipelineForInpainting.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
        #self.pipe2.enable_model_cpu_offload()
        self.pipe2.enable_xformers_memory_efficient_attention()
        
        self.pipe2.to("cuda")

        self.pipe3 = AutoPipelineForImage2Image.from_pipe(self.pipe2)
        #self.pipe3.enable_model_cpu_offload()
        self.pipe3.enable_xformers_memory_efficient_attention()
        """


    def __call__(self, data: Any) -> List[List[Dict[str, float]]]:
        """
        :param data: A dictionary contains `inputs` and optional `image` field.
        :return: A dictionary with `image` field contains image in base64.
        """
        encoded_image = data.pop("image", None)
        encoded_mask_image = data.pop("mask_image", None)
        
        prompt = data.pop("prompt", "")

        negative_prompt = data.pop("negative_prompt", "")

        method = data.pop("method", "slow")
        strength = data.pop("strength", 0.2)
        guidance_scale = data.pop("guidance_scale", 8.0)
        num_inference_steps = data.pop("num_inference_steps", 20)
        """
        if(method == "smooth"):
            if encoded_image is not None:
                image = self.decode_base64_image(encoded_image)
                out = self.smooth_pipe(prompt, image=image).images[0]
    
                return out
        """
        
        # process image
        if encoded_image is not None and encoded_mask_image is not None:
            image = self.decode_base64_image(encoded_image).convert("RGB")
            mask_image = self.decode_base64_image(encoded_mask_image).convert("RGB")
        else:
            image = None
            mask_image = None 

        
        """
        if(method == "fast"):
            image = self.fast_pipe(
                prompt=prompt,
                negative_prompt=negative_prompt,
                image=image,
                mask_image=mask_image,
                guidance_scale=guidance_scale,
                num_inference_steps=num_inference_steps,  # steps between 15 and 30 work well for us
                strength=strength,  # make sure to use `strength` below 1.0
                generator=self.generator,
            ).images[0]
    
            return image
        """

        #pipe = AutoPipelineForInpainting.from_pretrained("diffusers/stable-diffusion-xl-1.0-inpainting-0.1", torch_dtype=torch.float16, variant="fp16").to("cuda")
        """
        # run inference pipeline
        out = self.pipe(prompt=prompt, negative_prompt=negative_prompt, image=image, mask_image=mask_image, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale)

        print("1st pipeline part successful!")

        image = out.images[0].resize((1024, 1024))

        print("image resizing successful!")
        
        image = self.pipe2(
            prompt=prompt,
            negative_prompt=negative_prompt,
            image=image,
            mask_image=mask_image,
            guidance_scale=guidance_scale, #8.0
            num_inference_steps=int(num_inference_steps/10), #100
            strength=strength, #0.2
            output_type="latent",  # let's keep in latent to save some VRAM
        ).images[0]

        print("2nd pipeline part successful!")
        
        image2 = self.pipe3(
            prompt=prompt,
            image=image,
            guidance_scale=guidance_scale, #8.0
            num_inference_steps=int(num_inference_steps/10), #100
            strength=strength, #0.2
        ).images[0]

        print("3rd pipeline part successful!")
        
            
        # return first generate PIL image
        return image2
        """
        
        
        control_image = self.make_inpaint_condition(image, mask_image)

        # generate image
        image = self.pipe(
            prompt=prompt,
            negative_prompt=negative_prompt,
            num_inference_steps=num_inference_steps,
            eta=1.0,
            image=image,
            mask_image=mask_image,
            control_image=control_image,
            guidance_scale=guidance_scale,
            strength=strength
        ).images[0]

        return image
        
        
    
    # helper to decode input image
    def decode_base64_image(self, image_string):
        base64_image = base64.b64decode(image_string)
        buffer = BytesIO(base64_image)
        image = Image.open(buffer)
        return image

    def make_inpaint_condition(self, image, image_mask):
        image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
        image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0
    
        assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size"
        image[image_mask > 0.5] = -1.0  # set as masked pixel
        image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
        image = torch.from_numpy(image)
        return image