fffiloni commited on
Commit
2dc6006
1 Parent(s): 2c924a5

Create ref_in_gp3.py

Browse files
Files changed (1) hide show
  1. tasks/ref_in_gp3.py +108 -0
tasks/ref_in_gp3.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # --------------------------------------------------------
2
+ # X-Decoder -- Generalized Decoding for Pixel, Image, and Language
3
+ # Copyright (c) 2022 Microsoft
4
+ # Licensed under The MIT License [see LICENSE for details]
5
+ # Written by Jianwei Yang (jianwyan@microsoft.com)
6
+ # --------------------------------------------------------
7
+ import os
8
+ import openai
9
+ import torch
10
+ import numpy as np
11
+ from scipy import ndimage
12
+ from PIL import Image
13
+ from utils.inpainting import pad_image, crop_image
14
+ from torchvision import transforms
15
+ from utils.visualizer import Visualizer
16
+ from diffusers import StableDiffusionInpaintPipeline
17
+ from detectron2.utils.colormap import random_color
18
+ from detectron2.data import MetadataCatalog
19
+
20
+
21
+ t = []
22
+ t.append(transforms.Resize(512, interpolation=Image.BICUBIC))
23
+ transform = transforms.Compose(t)
24
+ metadata = MetadataCatalog.get('ade20k_panoptic_train')
25
+
26
+ pipe = StableDiffusionInpaintPipeline.from_pretrained(
27
+ # "stabilityai/stable-diffusion-2-inpainting",
28
+ "runwayml/stable-diffusion-inpainting",
29
+ revision="fp16",
30
+ torch_dtype=torch.float16,
31
+ ).to("cuda")
32
+
33
+ prompts = []
34
+ prompts.append("instruction: remove the person, task: (referring editing), source: [person], target:<clean and empty scene>.")
35
+ prompts.append("instruction: remove the person in the middle, task: (referring editing), source: [person in the middle], target:<clean and empty scene>.")
36
+ prompts.append("instruction: remove the dog on the left side, task: (referring editing), source: [dog on the left side], target:<clean and empty scene>.")
37
+ prompts.append("instruction: change the apple to a pear, task: (referring editing), source: [apple], target: <pear>.")
38
+ prompts.append("instruction: change the red apple to a green one, task: (referring editing), source: [red apple], target: <green apple>.")
39
+ prompts.append("instruction: change the color of bird's feathers from white to blue, task: (referring editing), source: [white bird], target: <blue bird>.")
40
+ prompts.append("instruction: replace the dog with a cat, task: (referring editing), source: [dot], target: <cat>.")
41
+ prompts.append("instruction: replace the red apple with a green one, task: (referring editing), source: [red apple], target: <green apple>.")
42
+
43
+ openai.api_type = "azure"
44
+ openai.api_base = "https://xdecoder.openai.azure.com/"
45
+ openai.api_version = "2022-12-01"
46
+ openai.api_key = os.environ["OPENAI_API_KEY"]
47
+
48
+ def get_gpt3_response(prompt):
49
+ response = openai.Completion.create(
50
+ engine="text001",
51
+ prompt=prompt,
52
+ temperature=0.7,
53
+ max_tokens=512,
54
+ top_p=1,
55
+ frequency_penalty=0,
56
+ presence_penalty=0,
57
+ )
58
+
59
+ return response
60
+
61
+ def referring_inpainting_gpt3(model, image, instruction, *args, **kwargs):
62
+ # convert instruction to source and target
63
+ instruction = instruction.replace('.', '')
64
+ print(instruction)
65
+ resp = get_gpt3_response(' '.join(prompts) + ' instruction: ' + instruction + ',')
66
+ resp_text = resp['choices'][0]['text']
67
+ print(resp_text)
68
+ ref_text = resp_text[resp_text.find('[')+1:resp_text.find(']')]
69
+ inp_text = resp_text[resp_text.find('<')+1:resp_text.find('>')]
70
+
71
+ model.model.metadata = metadata
72
+ texts = [[ref_text if ref_text.strip().endswith('.') else (ref_text.strip() + '.')]]
73
+ image_ori = crop_image(transform(image))
74
+
75
+ with torch.no_grad():
76
+ width = image_ori.size[0]
77
+ height = image_ori.size[1]
78
+ image = np.asarray(image_ori)
79
+ image_ori_np = np.asarray(image_ori)
80
+ images = torch.from_numpy(image.copy()).permute(2,0,1).cuda()
81
+
82
+ batch_inputs = [{'image': images, 'height': height, 'width': width, 'groundings': {'texts': texts}}]
83
+ outputs = model.model.evaluate_grounding(batch_inputs, None)
84
+ visual = Visualizer(image_ori_np, metadata=metadata)
85
+
86
+ grd_mask = (outputs[0]['grounding_mask'] > 0).float().cpu().numpy()
87
+ for idx, mask in enumerate(grd_mask):
88
+ color = random_color(rgb=True, maximum=1).astype(np.int32).tolist()
89
+ demo = visual.draw_binary_mask(mask, color=color, text=texts[idx])
90
+ res = demo.get_image()
91
+
92
+ if inp_text not in ['no', '']:
93
+ image_crop = image_ori
94
+ struct2 = ndimage.generate_binary_structure(2, 2)
95
+ mask_dilated = ndimage.binary_dilation(grd_mask[0], structure=struct2, iterations=3).astype(grd_mask[0].dtype)
96
+ mask = Image.fromarray(mask_dilated * 255).convert('RGB')
97
+ image_and_mask = {
98
+ "image": image_crop,
99
+ "mask": mask,
100
+ }
101
+ # images_inpainting = inpainting(inpainting_model, image_and_mask, inp_text, ddim_steps, num_samples, scale, seed)
102
+ width = image_ori.size[0]; height = image_ori.size[1]
103
+ images_inpainting = pipe(prompt = inp_text.strip(), image=image_and_mask['image'], mask_image=image_and_mask['mask'], height=height, width=width).images
104
+ torch.cuda.empty_cache()
105
+ return images_inpainting[0]
106
+ else:
107
+ torch.cuda.empty_cache()
108
+ return Image.fromarray(res)