merve HF staff commited on
Commit
59c202d
1 Parent(s): 1c5e07f

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +309 -0
app.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ import numpy as np
4
+ import os
5
+ import requests
6
+ import torch
7
+ import torchvision.transforms as T
8
+ from PIL import Image
9
+ from transformers import AutoProcessor, AutoModelForVision2Seq
10
+ import cv2
11
+ import spaces
12
+ import ast
13
+
14
+ colors = [
15
+ (0, 255, 0),
16
+ (0, 0, 255),
17
+ (255, 255, 0),
18
+ (255, 0, 255),
19
+ (0, 255, 255),
20
+ (114, 128, 250),
21
+ (0, 165, 255),
22
+ (0, 128, 0),
23
+ (144, 238, 144),
24
+ (238, 238, 175),
25
+ (255, 191, 0),
26
+ (0, 128, 0),
27
+ (226, 43, 138),
28
+ (255, 0, 255),
29
+ (0, 215, 255),
30
+ (255, 0, 0),
31
+ ]
32
+
33
+ color_map = {
34
+ f"{color_id}": f"#{hex(color[2])[2:].zfill(2)}{hex(color[1])[2:].zfill(2)}{hex(color[0])[2:].zfill(2)}" for color_id, color in enumerate(colors)
35
+ }
36
+
37
+
38
+ def is_overlapping(rect1, rect2):
39
+ x1, y1, x2, y2 = rect1
40
+ x3, y3, x4, y4 = rect2
41
+ return not (x2 < x3 or x1 > x4 or y2 < y3 or y1 > y4)
42
+
43
+ @spaces.GPU
44
+ def draw_entity_boxes_on_image(image, entities, show=False, save_path=None, entity_index=-1):
45
+ """_summary_
46
+ Args:
47
+ image (_type_): image or image path
48
+ collect_entity_location (_type_): _description_
49
+ """
50
+ if isinstance(image, Image.Image):
51
+ image_h = image.height
52
+ image_w = image.width
53
+ image = np.array(image)[:, :, [2, 1, 0]]
54
+ elif isinstance(image, str):
55
+ if os.path.exists(image):
56
+ pil_img = Image.open(image).convert("RGB")
57
+ image = np.array(pil_img)[:, :, [2, 1, 0]]
58
+ image_h = pil_img.height
59
+ image_w = pil_img.width
60
+ else:
61
+ raise ValueError(f"invaild image path, {image}")
62
+ elif isinstance(image, torch.Tensor):
63
+ # pdb.set_trace()
64
+ image_tensor = image.cpu()
65
+ reverse_norm_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073])[:, None, None]
66
+ reverse_norm_std = torch.tensor([0.26862954, 0.26130258, 0.27577711])[:, None, None]
67
+ image_tensor = image_tensor * reverse_norm_std + reverse_norm_mean
68
+ pil_img = T.ToPILImage()(image_tensor)
69
+ image_h = pil_img.height
70
+ image_w = pil_img.width
71
+ image = np.array(pil_img)[:, :, [2, 1, 0]]
72
+ else:
73
+ raise ValueError(f"invaild image format, {type(image)} for {image}")
74
+
75
+ if len(entities) == 0:
76
+ return image
77
+
78
+ indices = list(range(len(entities)))
79
+ if entity_index >= 0:
80
+ indices = [entity_index]
81
+
82
+ # Not to show too many bboxes
83
+ entities = entities[:len(color_map)]
84
+
85
+ new_image = image.copy()
86
+ previous_bboxes = []
87
+ # size of text
88
+ text_size = 1
89
+ # thickness of text
90
+ text_line = 1 # int(max(1 * min(image_h, image_w) / 512, 1))
91
+ box_line = 3
92
+ (c_width, text_height), _ = cv2.getTextSize("F", cv2.FONT_HERSHEY_COMPLEX, text_size, text_line)
93
+ base_height = int(text_height * 0.675)
94
+ text_offset_original = text_height - base_height
95
+ text_spaces = 3
96
+
97
+ # num_bboxes = sum(len(x[-1]) for x in entities)
98
+ used_colors = colors # random.sample(colors, k=num_bboxes)
99
+
100
+ color_id = -1
101
+ for entity_idx, (entity_name, (start, end), bboxes) in enumerate(entities):
102
+ color_id += 1
103
+ if entity_idx not in indices:
104
+ continue
105
+ for bbox_id, (x1_norm, y1_norm, x2_norm, y2_norm) in enumerate(bboxes):
106
+ # if start is None and bbox_id > 0:
107
+ # color_id += 1
108
+ orig_x1, orig_y1, orig_x2, orig_y2 = int(x1_norm * image_w), int(y1_norm * image_h), int(x2_norm * image_w), int(y2_norm * image_h)
109
+
110
+ # draw bbox
111
+ # random color
112
+ color = used_colors[color_id] # tuple(np.random.randint(0, 255, size=3).tolist())
113
+ new_image = cv2.rectangle(new_image, (orig_x1, orig_y1), (orig_x2, orig_y2), color, box_line)
114
+
115
+ l_o, r_o = box_line // 2 + box_line % 2, box_line // 2 + box_line % 2 + 1
116
+
117
+ x1 = orig_x1 - l_o
118
+ y1 = orig_y1 - l_o
119
+
120
+ if y1 < text_height + text_offset_original + 2 * text_spaces:
121
+ y1 = orig_y1 + r_o + text_height + text_offset_original + 2 * text_spaces
122
+ x1 = orig_x1 + r_o
123
+
124
+ # add text background
125
+ (text_width, text_height), _ = cv2.getTextSize(f" {entity_name}", cv2.FONT_HERSHEY_COMPLEX, text_size, text_line)
126
+ text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2 = x1, y1 - (text_height + text_offset_original + 2 * text_spaces), x1 + text_width, y1
127
+
128
+ for prev_bbox in previous_bboxes:
129
+ while is_overlapping((text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2), prev_bbox):
130
+ text_bg_y1 += (text_height + text_offset_original + 2 * text_spaces)
131
+ text_bg_y2 += (text_height + text_offset_original + 2 * text_spaces)
132
+ y1 += (text_height + text_offset_original + 2 * text_spaces)
133
+
134
+ if text_bg_y2 >= image_h:
135
+ text_bg_y1 = max(0, image_h - (text_height + text_offset_original + 2 * text_spaces))
136
+ text_bg_y2 = image_h
137
+ y1 = image_h
138
+ break
139
+
140
+ alpha = 0.5
141
+ for i in range(text_bg_y1, text_bg_y2):
142
+ for j in range(text_bg_x1, text_bg_x2):
143
+ if i < image_h and j < image_w:
144
+ if j < text_bg_x1 + 1.35 * c_width:
145
+ # original color
146
+ bg_color = color
147
+ else:
148
+ # white
149
+ bg_color = [255, 255, 255]
150
+ new_image[i, j] = (alpha * new_image[i, j] + (1 - alpha) * np.array(bg_color)).astype(np.uint8)
151
+
152
+ cv2.putText(
153
+ new_image, f" {entity_name}", (x1, y1 - text_offset_original - 1 * text_spaces), cv2.FONT_HERSHEY_COMPLEX, text_size, (0, 0, 0), text_line, cv2.LINE_AA
154
+ )
155
+ # previous_locations.append((x1, y1))
156
+ previous_bboxes.append((text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2))
157
+
158
+ pil_image = Image.fromarray(new_image[:, :, [2, 1, 0]])
159
+ if save_path:
160
+ pil_image.save(save_path)
161
+ if show:
162
+ pil_image.show()
163
+
164
+ return pil_image
165
+
166
+
167
+
168
+
169
+ ckpt = "microsoft/kosmos-2-patch14-224"
170
+
171
+ model = AutoModelForVision2Seq.from_pretrained(ckpt).to("cuda")
172
+ processor = AutoProcessor.from_pretrained(ckpt)
173
+
174
+ @spaces.GPU
175
+ def generate_predictions(image_input, text_input, question=None):
176
+
177
+ # Save the image and load it again to match the original Kosmos-2 demo.
178
+ # (https://github.com/microsoft/unilm/blob/f4695ed0244a275201fff00bee495f76670fbe70/kosmos-2/demo/gradio_app.py#L345-L346)
179
+ user_image_path = "/tmp/user_input_test_image.jpg"
180
+ image_input.save(user_image_path)
181
+ # This might give different results from the original argument `image_input`
182
+ image_input = Image.open(user_image_path)
183
+
184
+ if text_input == "Brief":
185
+ text_input = "<grounding>An image of"
186
+ elif text_input == "Detailed":
187
+ text_input = "<grounding>Describe this image in detail:"
188
+ if question:
189
+ text_input = f"<grounding>{question}"
190
+ print(text_input)
191
+ inputs = processor(text=text_input, images=image_input, return_tensors="pt").to("cuda")
192
+
193
+ generated_ids = model.generate(
194
+ pixel_values=inputs["pixel_values"],
195
+ input_ids=inputs["input_ids"],
196
+ attention_mask=inputs["attention_mask"],
197
+ image_embeds=None,
198
+ image_embeds_position_mask=inputs["image_embeds_position_mask"],
199
+ use_cache=True,
200
+ max_new_tokens=128,
201
+ )
202
+
203
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
204
+
205
+ # By default, the generated text is cleanup and the entities are extracted.
206
+ processed_text, entities = processor.post_process_generation(generated_text)
207
+
208
+ annotated_image = draw_entity_boxes_on_image(image_input, entities, show=False)
209
+
210
+ color_id = -1
211
+ entity_info = []
212
+ filtered_entities = []
213
+ for entity in entities:
214
+ entity_name, (start, end), bboxes = entity
215
+ if start == end:
216
+ # skip bounding bbox without a `phrase` associated
217
+ continue
218
+ color_id += 1
219
+ # for bbox_id, _ in enumerate(bboxes):
220
+ # if start is None and bbox_id > 0:
221
+ # color_id += 1
222
+ entity_info.append(((start, end), color_id))
223
+ filtered_entities.append(entity)
224
+
225
+ colored_text = []
226
+ prev_start = 0
227
+ end = 0
228
+ for idx, ((start, end), color_id) in enumerate(entity_info):
229
+ if start > prev_start:
230
+ colored_text.append((processed_text[prev_start:start], None))
231
+ colored_text.append((processed_text[start:end], f"{color_id}"))
232
+ prev_start = end
233
+
234
+ if end < len(processed_text):
235
+ colored_text.append((processed_text[end:len(processed_text)], None))
236
+
237
+ return annotated_image, colored_text, str(filtered_entities)
238
+
239
+ term_of_use = """
240
+ ### Terms of use
241
+ By using this model, users are required to agree to the following terms:
242
+ The model is intended for academic and research purposes.
243
+ The utilization of the model to create unsuitable material is strictly forbidden and not endorsed by this work.
244
+ The accountability for any improper or unacceptable application of the model rests exclusively with the individuals who generated such content.
245
+
246
+ ### License
247
+ This project is licensed under the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct).
248
+ """
249
+
250
+ with gr.Blocks(title="Kosmos-2", theme=gr.themes.Base()).queue() as demo:
251
+ gr.Markdown(("""
252
+ # Kosmos-2: Grounding Multimodal Large Language Models to the World
253
+ [[Paper]](https://arxiv.org/abs/2306.14824) [[Code]](https://github.com/microsoft/unilm/blob/master/kosmos-2)
254
+ ### This model can answer visual questions, does localize objects in a given image, and even caption the image without hallucination!
255
+ ### To get started, simply pick one of the images. Pick "Brief" or "Detailed" input for captioning. For visual question answering, pick "None" and enter your question.
256
+ """))
257
+ with gr.Row():
258
+ with gr.Column():
259
+ image_input = gr.Image(type="pil", label="Test Image")
260
+ text_input = gr.Radio(["Brief", "Detailed", "None"], label="Captioning Detail", value="Brief")
261
+ question = gr.Textbox(label="Visual Question Answering")
262
+ run_button = gr.Button(value="Run", visible=True)
263
+
264
+ with gr.Column():
265
+ image_output = gr.Image(type="pil")
266
+ text_output1 = gr.HighlightedText(
267
+ label="Generated Description",
268
+ combine_adjacent=False,
269
+ show_legend=True,
270
+ )
271
+
272
+ with gr.Row():
273
+ with gr.Column():
274
+ gr.Examples(examples=[
275
+ ["/content/IMG_4509.jpg", "Detailed", None],
276
+ ["/content/IMG_4509.jpg", "Brief", None],
277
+ ["/content/IMG_4509.jpg", "None", "What is in this image?"],
278
+ ], inputs=[image_input, text_input, question])
279
+
280
+ gr.Markdown(term_of_use)
281
+
282
+ # record which text span (label) is selected
283
+ selected = gr.Number(-1, show_label=False, visible=False)
284
+
285
+ # record the current `entities`
286
+ entity_output = gr.Textbox(visible=False)
287
+
288
+ # get the current selected span label
289
+ def get_text_span_label(evt: gr.SelectData):
290
+ if evt.value[-1] is None:
291
+ return -1
292
+ return int(evt.value[-1])
293
+ # and set this information to `selected`
294
+ text_output1.select(get_text_span_label, None, selected)
295
+
296
+ # update output image when we change the span (enity) selection
297
+ def update_output_image(img_input, image_output, entities, idx):
298
+ entities = ast.literal_eval(entities)
299
+ updated_image = draw_entity_boxes_on_image(img_input, entities, entity_index=idx)
300
+ return updated_image
301
+ selected.change(update_output_image, [image_input, image_output, entity_output, selected], [image_output])
302
+
303
+ run_button.click(fn=generate_predictions,
304
+ inputs=[image_input, text_input, question],
305
+ outputs=[image_output, text_output1, entity_output],
306
+ show_progress=True, queue=True)
307
+
308
+ demo.launch(debug=True)
309
+