File size: 5,056 Bytes
2dc6006
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b967c66
 
 
 
2dc6006
 
 
 
c2bb385
2dc6006
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# --------------------------------------------------------
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
# Copyright (c) 2022 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Jianwei Yang (jianwyan@microsoft.com)
# --------------------------------------------------------
import os
import openai
import torch
import numpy as np
from scipy import ndimage
from PIL import Image
from utils.inpainting import pad_image, crop_image
from torchvision import transforms
from utils.visualizer import Visualizer
from diffusers import StableDiffusionInpaintPipeline
from detectron2.utils.colormap import random_color
from detectron2.data import MetadataCatalog


t = []
t.append(transforms.Resize(512, interpolation=Image.BICUBIC))
transform = transforms.Compose(t)
metadata = MetadataCatalog.get('ade20k_panoptic_train')

pipe = StableDiffusionInpaintPipeline.from_pretrained(
    # "stabilityai/stable-diffusion-2-inpainting",
    "runwayml/stable-diffusion-inpainting",
    revision="fp16", 
    torch_dtype=torch.float16,
).to("cuda")

prompts = []
prompts.append("instruction: remove the person, task: (referring editing), source: [person], target:<clean and empty scene>.")
prompts.append("instruction: remove the person in the middle, task: (referring editing), source: [person in the middle], target:<clean and empty scene>.")
prompts.append("instruction: remove the dog on the left side, task: (referring editing), source: [dog on the left side], target:<clean and empty scene>.")
prompts.append("instruction: change the apple to a pear, task: (referring editing), source: [apple], target: <pear>.")
prompts.append("instruction: change the red apple to a green one, task: (referring editing), source: [red apple], target: <green apple>.")
prompts.append("instruction: change the color of bird's feathers from white to blue, task: (referring editing), source: [white bird], target: <blue bird>.")
prompts.append("instruction: replace the dog with a cat, task: (referring editing), source: [dot], target: <cat>.")
prompts.append("instruction: replace the red apple with a green one, task: (referring editing), source: [red apple], target: <green apple>.")

#openai.api_type = "azure"
#openai.api_base = "https://xdecoder.openai.azure.com/"
#openai.api_version = "2022-12-01"
openai.organization = os.environ["OPENAI_ORG"]
openai.api_key = os.environ["OPENAI_API_KEY"]

def get_gpt3_response(prompt):
    response = openai.Completion.create(
      model="text-davinci-003",
      prompt=prompt,
      temperature=0.7,
      max_tokens=512,
      top_p=1,
      frequency_penalty=0,
      presence_penalty=0,
    )
    
    return response

def referring_inpainting_gpt3(model, image, instruction, *args, **kwargs):    
    # convert instruction to source and target
    instruction = instruction.replace('.', '')
    print(instruction)
    resp = get_gpt3_response(' '.join(prompts) + ' instruction: ' + instruction + ',')
    resp_text = resp['choices'][0]['text']
    print(resp_text)
    ref_text = resp_text[resp_text.find('[')+1:resp_text.find(']')]
    inp_text = resp_text[resp_text.find('<')+1:resp_text.find('>')]

    model.model.metadata = metadata
    texts = [[ref_text if ref_text.strip().endswith('.') else (ref_text.strip() + '.')]]
    image_ori = crop_image(transform(image))

    with torch.no_grad():
        width = image_ori.size[0]
        height = image_ori.size[1]
        image = np.asarray(image_ori)
        image_ori_np = np.asarray(image_ori)
        images = torch.from_numpy(image.copy()).permute(2,0,1).cuda()

        batch_inputs = [{'image': images, 'height': height, 'width': width, 'groundings': {'texts': texts}}]        
        outputs = model.model.evaluate_grounding(batch_inputs, None)
        visual = Visualizer(image_ori_np, metadata=metadata)

        grd_mask = (outputs[0]['grounding_mask'] > 0).float().cpu().numpy()
        for idx, mask in enumerate(grd_mask):
            color = random_color(rgb=True, maximum=1).astype(np.int32).tolist()
            demo = visual.draw_binary_mask(mask, color=color, text=texts[idx])
        res = demo.get_image()
    
    if inp_text not in ['no', '']:
        image_crop = image_ori
        struct2 = ndimage.generate_binary_structure(2, 2)
        mask_dilated = ndimage.binary_dilation(grd_mask[0], structure=struct2, iterations=3).astype(grd_mask[0].dtype)
        mask = Image.fromarray(mask_dilated * 255).convert('RGB')
        image_and_mask = {
            "image": image_crop,
            "mask": mask,
        }
        # images_inpainting = inpainting(inpainting_model, image_and_mask, inp_text, ddim_steps, num_samples, scale, seed)
        width = image_ori.size[0]; height = image_ori.size[1]
        images_inpainting = pipe(prompt = inp_text.strip(), image=image_and_mask['image'], mask_image=image_and_mask['mask'], height=height, width=width).images
        torch.cuda.empty_cache()
        return images_inpainting[0]
    else:
        torch.cuda.empty_cache()
        return Image.fromarray(res)