File size: 2,136 Bytes
d3fbdbe
 
 
 
 
187d974
d3fbdbe
72c2a69
d3fbdbe
 
187d974
d3fbdbe
 
 
0c7515f
d3fbdbe
 
187d974
 
d3fbdbe
b635b0d
 
 
72c2a69
d3fbdbe
175001f
 
d3fbdbe
 
 
 
0c7515f
83ec831
d3fbdbe
 
e8b2b96
d3fbdbe
 
5330264
d3fbdbe
5330264
676a18f
d3fbdbe
187d974
 
 
 
 
d3fbdbe
187d974
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from PIL import Image
import os
import torch


from segmentation import get_cropped, get_blurred_mask, get_cropped_face, init as init_seg
from img2txt import derive_caption, init as init_img2txt
from utils import overlay_on_white_background
from adapter_model import MODEL

init_seg()
init_img2txt()

ip_model = MODEL("inpaint")
negative_prompt = "(deformed iris, deformed pupils, semi-realistic, cgi, 3d, render, sketch, cartoon, drawing, anime:1.4), text, close up, cropped, out of frame, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long nec"

def generate(img_openpose_gen: Image, img_human: Image, img_clothes: Image, segment_id: int):
    cropped_clothes = get_cropped(img_openpose_gen, segment_id, False).resize((512, 768)) 
    cropped_body = get_cropped(img_human, segment_id, True).resize((512, 768))

    composite = Image.alpha_composite(cropped_body.convert('RGBA'),
                                      cropped_clothes.convert('RGBA')
                                )
    composite = overlay_on_white_background(composite)

    #mask = get_blurred_mask(composite, segment_id)
    mask = get_blurred_mask(img_human, segment_id)
    prompt = derive_caption(img_clothes)

    ip_gen = ip_model.model.generate(
        prompt=prompt,
        negative_prompt=negative_prompt,
        pil_image=img_clothes,
        num_samples=1,
        num_inference_steps=50,
        seed=123,
        image=composite,
        mask_image=mask,
        strength=0.8,
        guidance_scale=7,
        scale=0.8
        )[0]

    cropped_head = get_cropped_face(composite)
    
    ip_gen_final = Image.alpha_composite(ip_gen.convert("RGBA"),
                                        cropped_head
                                  )
    torch.cuda.empty_cache()
    return ip_gen_final.resize(img_human.size)