krina1234 commited on
Commit
3820509
1 Parent(s): 8b66002

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +360 -0
app.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import random
3
+ import numpy as np
4
+ import os
5
+ import requests
6
+ import torch
7
+ import torchvision.transforms as T
8
+ from PIL import Image
9
+ from transformers import AutoProcessor, AutoModelForVision2Seq
10
+ import cv2
11
+ import spaces
12
+ import ast
13
+
14
+ colors = [
15
+ (0, 255, 0),
16
+ (0, 0, 255),
17
+ (255, 255, 0),
18
+ (255, 0, 255),
19
+ (0, 255, 255),
20
+ (114, 128, 250),
21
+ (0, 165, 255),
22
+ (0, 128, 0),
23
+ (144, 238, 144),
24
+ (238, 238, 175),
25
+ (255, 191, 0),
26
+ (0, 128, 0),
27
+ (226, 43, 138),
28
+ (255, 0, 255),
29
+ (0, 215, 255),
30
+ (255, 0, 0),
31
+ ]
32
+
33
+ color_map = {
34
+ f"{color_id}": f"#{hex(color[2])[2:].zfill(2)}{hex(color[1])[2:].zfill(2)}{hex(color[0])[2:].zfill(2)}" for color_id, color in enumerate(colors)
35
+ }
36
+
37
+
38
+ def is_overlapping(rect1, rect2):
39
+ x1, y1, x2, y2 = rect1
40
+ x3, y3, x4, y4 = rect2
41
+ return not (x2 < x3 or x1 > x4 or y2 < y3 or y1 > y4)
42
+
43
+ @spaces.GPU
44
+ def draw_entity_boxes_on_image(image, entities, show=False, save_path=None, entity_index=-1):
45
+ """_summary_
46
+ Args:
47
+ image (_type_): image or image path
48
+ collect_entity_location (_type_): _description_
49
+ """
50
+ if isinstance(image, Image.Image):
51
+ image_h = image.height
52
+ image_w = image.width
53
+ image = np.array(image)[:, :, [2, 1, 0]]
54
+ elif isinstance(image, str):
55
+ if os.path.exists(image):
56
+ pil_img = Image.open(image).convert("RGB")
57
+ image = np.array(pil_img)[:, :, [2, 1, 0]]
58
+ image_h = pil_img.height
59
+ image_w = pil_img.width
60
+ else:
61
+ raise ValueError(f"invaild image path, {image}")
62
+ elif isinstance(image, torch.Tensor):
63
+ # pdb.set_trace()
64
+ image_tensor = image.cpu()
65
+ reverse_norm_mean = torch.tensor([0.48145466, 0.4578275, 0.40821073])[:, None, None]
66
+ reverse_norm_std = torch.tensor([0.26862954, 0.26130258, 0.27577711])[:, None, None]
67
+ image_tensor = image_tensor * reverse_norm_std + reverse_norm_mean
68
+ pil_img = T.ToPILImage()(image_tensor)
69
+ image_h = pil_img.height
70
+ image_w = pil_img.width
71
+ image = np.array(pil_img)[:, :, [2, 1, 0]]
72
+ else:
73
+ raise ValueError(f"invaild image format, {type(image)} for {image}")
74
+
75
+ if len(entities) == 0:
76
+ return image
77
+
78
+ indices = list(range(len(entities)))
79
+ if entity_index >= 0:
80
+ indices = [entity_index]
81
+
82
+ # Not to show too many bboxes
83
+ entities = entities[:len(color_map)]
84
+
85
+ new_image = image.copy()
86
+ previous_bboxes = []
87
+ # size of text
88
+ text_size = 1
89
+ # thickness of text
90
+ text_line = 1 # int(max(1 * min(image_h, image_w) / 512, 1))
91
+ box_line = 3
92
+ (c_width, text_height), _ = cv2.getTextSize("F", cv2.FONT_HERSHEY_COMPLEX, text_size, text_line)
93
+ base_height = int(text_height * 0.675)
94
+ text_offset_original = text_height - base_height
95
+ text_spaces = 3
96
+
97
+ # num_bboxes = sum(len(x[-1]) for x in entities)
98
+ used_colors = colors # random.sample(colors, k=num_bboxes)
99
+
100
+ color_id = -1
101
+ for entity_idx, (entity_name, (start, end), bboxes) in enumerate(entities):
102
+ color_id += 1
103
+ if entity_idx not in indices:
104
+ continue
105
+ for bbox_id, (x1_norm, y1_norm, x2_norm, y2_norm) in enumerate(bboxes):
106
+ orig_x1, orig_y1, orig_x2, orig_y2 = int(x1_norm * image_w), int(y1_norm * image_h), int(x2_norm * image_w), int(y2_norm * image_h)
107
+
108
+ # draw bbox
109
+ color = used_colors[color_id]
110
+ new_image = cv2.rectangle(new_image, (orig_x1, orig_y1), (orig_x2, orig_y2), color, box_line)
111
+
112
+ l_o, r_o = box_line // 2 + box_line % 2, box_line // 2 + box_line % 2 + 1
113
+
114
+ x1 = orig_x1 - l_o
115
+ y1 = orig_y1 - l_o
116
+
117
+ if y1 < text_height + text_offset_original + 2 * text_spaces:
118
+ y1 = orig_y1 + r_o + text_height + text_offset_original + 2 * text_spaces
119
+ x1 = orig_x1 + r_o
120
+
121
+ (text_width, text_height), _ = cv2.getTextSize(f" {entity_name}", cv2.FONT_HERSHEY_COMPLEX, text_size, text_line)
122
+ text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2 = x1, y1 - (text_height + text_offset_original + 2 * text_spaces), x1 + text_width, y1
123
+
124
+ for prev_bbox in previous_bboxes:
125
+ while is_overlapping((text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2), prev_bbox):
126
+ text_bg_y1 += (text_height + text_offset_original + 2 * text_spaces)
127
+ text_bg_y2 += (text_height + text_offset_original + 2 * text_spaces)
128
+ y1 += (text_height + text_offset_original + 2 * text_spaces)
129
+
130
+ if text_bg_y2 >= image_h:
131
+ text_bg_y1 = max(0, image_h - (text_height + text_offset_original + 2 * text_spaces))
132
+ text_bg_y2 = image_h
133
+ y1 = image_h
134
+ break
135
+
136
+ alpha = 0.5
137
+ for i in range(text_bg_y1, text_bg_y2):
138
+ for j in range(text_bg_x1, text_bg_x2):
139
+ if i < image_h and j < image_w:
140
+ if j < text_bg_x1 + 1.35 * c_width:
141
+ bg_color = color
142
+ else:
143
+ bg_color = [255, 255, 255]
144
+ new_image[i, j] = (alpha * new_image[i, j] + (1 - alpha) * np.array(bg_color)).astype(np.uint8)
145
+
146
+ cv2.putText(
147
+ new_image, f" {entity_name}", (x1, y1 - text_offset_original - 1 * text_spaces), cv2.FONT_HERSHEY_COMPLEX, text_size, (0, 0, 0), text_line, cv2.LINE_AA
148
+ )
149
+ previous_bboxes.append((text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2))
150
+
151
+ pil_image = Image.fromarray(new_image[:, :, [2, 1, 0]])
152
+ if save_path:
153
+ pil_image.save(save_path)
154
+ if show:
155
+ pil_image.show()
156
+
157
+ return pil_image
158
+
159
+
160
+ ckpt = "microsoft/kosmos-2-patch14-224"
161
+
162
+ model = AutoModelForVision2Seq.from_pretrained(ckpt)
163
+ processor = AutoProcessor.from_pretrained(ckpt)
164
+
165
+ @spaces.GPU
166
+ def generate_predictions(image_input, text_input, question=None):
167
+
168
+ user_image_path = "/tmp/user_input_test_image.jpg"
169
+ image_input.save(user_image_path)
170
+ image_input = Image.open(user_image_path)
171
+
172
+ if text_input == "Brief":
173
+ text_input = "<grounding>An image of"
174
+ elif text_input == "Detailed":
175
+ text_input = "<grounding>Describe this image in detail:"
176
+ if question:
177
+ text_input = f"<grounding>{question}"
178
+
179
+ inputs = processor(text=text_input, images=image_input, return_tensors="pt")
180
+
181
+ generated_ids = model.generate(
182
+ pixel_values=inputs["pixel_values"],
183
+ input_ids=inputs["input_ids"],
184
+ attention_mask=inputs["attention_mask"],
185
+ image_embeds=None,
186
+ image_embeds_position_mask=inputs["image_embeds_position_mask"],
187
+ use_cache=True,
188
+ max_new_tokens=128,
189
+ )
190
+
191
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
192
+
193
+ processed_text, entities = processor.post_process_generation(generated_text)
194
+
195
+ annotated_image = draw_entity_boxes_on_image(image_input, entities, show=False)
196
+
197
+ color_id = -1
198
+ entity_info = []
199
+ filtered_entities = []
200
+ for entity in entities:
201
+ entity_name, (start, end), bboxes = entity
202
+ if start == end:
203
+ continue
204
+ color_id += 1
205
+ entity_info.append(((start, end), color_id))
206
+ filtered_entities.append(entity)
207
+
208
+ colored_text = []
209
+ prev_start = 0
210
+ end = 0
211
+ for idx, ((start, end), color_id) in enumerate(entity_info):
212
+ if start > prev_start:
213
+ colored_text.append((processed_text[prev_start:start], None))
214
+ colored_text.append((processed_text[start:end], f"{color_id}"))
215
+ prev_start = end
216
+
217
+ if end < len(processed_text):
218
+ colored_text.append((processed_text[end:len(processed_text)], None))
219
+
220
+ return annotated_image, colored_text, str(filtered_entities)
221
+
222
+ term_of_use = """
223
+ ### Terms of use
224
+ By using this model, users are required to agree to the following terms:
225
+ The model is intended for academic and research purposes.
226
+ The utilization of the model to create unsuitable material is strictly forbidden and not endorsed by this work.
227
+ The accountability for any improper or unacceptable application of the model rests exclusively with the individuals who generated such content.
228
+ """
229
+
230
+ # Custom CSS styles for Gradio interface
231
+ custom_css = """
232
+ /* Add your custom CSS styles here */
233
+ .gradio-root {
234
+ font-family: Arial, sans-serif;
235
+ }
236
+
237
+ .gradio-dropdown select {
238
+ padding: 8px 10px;
239
+ border-radius: 5px;
240
+ border: 1px solid #ccc;
241
+ background-color: #f9f9f9;
242
+ }
243
+
244
+ .gradio-radio input[type="radio"]:checked+label {
245
+ background-color: #007bff;
246
+ color: #fff;
247
+ }
248
+
249
+ .gradio-radio input[type="radio"]:not(:checked)+label {
250
+ background-color: #fff;
251
+ color: #555;
252
+ }
253
+
254
+ .gradio-radio input[type="radio"]:focus+label {
255
+ outline: none;
256
+ border-color: #007bff;
257
+ }
258
+
259
+ .gradio-radio label {
260
+ border-radius: 5px;
261
+ padding: 8px 12px;
262
+ margin: 0;
263
+ cursor: pointer;
264
+ }
265
+
266
+ .gradio-radio label:hover {
267
+ background-color: #f0f0f0;
268
+ }
269
+
270
+ .gradio-slider-container {
271
+ padding: 10px 0;
272
+ }
273
+
274
+ .gradio-slider {
275
+ -webkit-appearance: none;
276
+ width: 100%;
277
+ height: 8px;
278
+ border-radius: 5px;
279
+ background-color: #f9f9f9;
280
+ outline: none;
281
+ opacity: 0.7;
282
+ -webkit-transition: .2s;
283
+ transition: opacity .2s;
284
+ }
285
+
286
+ .gradio-slider::-webkit-slider-thumb {
287
+ -webkit-appearance: none;
288
+ appearance: none;
289
+ width: 16px;
290
+ height: 16px;
291
+ border-radius: 50%;
292
+ background-color: #007bff;
293
+ cursor: pointer;
294
+ }
295
+
296
+ .gradio-slider::-moz-range-thumb {
297
+ width: 16px;
298
+ height: 16px;
299
+ border-radius: 50%;
300
+ background-color: #007bff;
301
+ cursor: pointer;
302
+ }
303
+ """
304
+
305
+ # Create Gradio interface
306
+ with gr.Blocks(title="Kosmos-2", theme=gr.themes.Base(), css=custom_css).queue() as demo:
307
+ # Add Gradio interface components
308
+ # Add Gradio interface components
309
+ gr.Markdown(("""
310
+ # Kosmos-2: Grounding Multimodal Large Language Models to the World
311
+ ### This model can answer visual questions, does localize objects in a given image, and even caption the image without hallucination!
312
+ ### To get started, simply pick one of the images. Pick "Brief" or "Detailed" input for captioning. For visual question answering, pick "None" and enter your question.
313
+ """))
314
+ with gr.Row():
315
+ with gr.Column():
316
+ image_input = gr.Image(type="pil", label="Test Image")
317
+ text_input = gr.Radio(["Brief", "Detailed", "None"], label="Captioning Detail", value="Brief")
318
+ question = gr.Textbox(label="Visual Question Answering")
319
+ run_button = gr.Button(value="Run", visible=True)
320
+
321
+ with gr.Column():
322
+ image_output = gr.Image(type="pil")
323
+ text_output1 = gr.HighlightedText(
324
+ label="Generated Description",
325
+ combine_adjacent=False,
326
+ show_legend=True,
327
+ )
328
+
329
+ with gr.Row():
330
+ with gr.Column():
331
+ gr.Examples(examples=[
332
+ ["/content/krina2.png", "Detailed", None],
333
+ ["/content/krina.png", "Brief", None],
334
+ ["/content/krina3.png", "None", "What is in this image?"],
335
+ ], inputs=[image_input, text_input, question])
336
+
337
+ gr.Markdown(term_of_use)
338
+
339
+ selected = gr.Number(-1, show_label=False, visible=False)
340
+
341
+ entity_output = gr.Textbox(visible=False)
342
+
343
+ def get_text_span_label(evt: gr.SelectData):
344
+ if evt.value[-1] is None:
345
+ return -1
346
+ return int(evt.value[-1])
347
+ text_output1.select(get_text_span_label, None, selected)
348
+
349
+ def update_output_image(img_input, image_output, entities, idx):
350
+ entities = ast.literal_eval(entities)
351
+ updated_image = draw_entity_boxes_on_image(img_input, entities, entity_index=idx)
352
+ return updated_image
353
+ selected.change(update_output_image, [image_input, image_output, entity_output, selected], [image_output])
354
+
355
+ run_button.click(fn=generate_predictions,
356
+ inputs=[image_input, text_input, question],
357
+ outputs=[image_output, text_output1, entity_output],
358
+ show_progress=True, queue=True)
359
+
360
+ demo.launch(debug=True)