VictorSanh commited on
Commit
7073167
1 Parent(s): 87ce3a7

make it compatible with the new gr.chatinterface

Browse files
Files changed (1) hide show
  1. playground.py +422 -0
playground.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import copy
2
+ import hashlib
3
+ import os
4
+ import re
5
+ # import spaces
6
+ import subprocess
7
+ import torch
8
+ import PIL
9
+
10
+ from pathlib import Path
11
+ from threading import Thread
12
+ from typing import List, Optional, Tuple
13
+ from urllib.parse import urlparse
14
+ from PIL import Image
15
+
16
+ import gradio as gr
17
+ from gradio import processing_utils
18
+ from gradio_client.client import DEFAULT_TEMP_DIR
19
+ from transformers import AutoProcessor, AutoModelForCausalLM, TextIteratorStreamer, logging
20
+ from transformers.image_utils import to_numpy_array, PILImageResampling, ChannelDimension
21
+ from transformers.image_transforms import resize, to_channel_dimension_format
22
+
23
+ # subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
24
+
25
+ DEVICE = torch.device("cuda")
26
+ MODELS = {
27
+ "284 - neftune - opt 18'500": AutoModelForCausalLM.from_pretrained(
28
+ "HuggingFaceM4/idefics2",
29
+ trust_remote_code=True,
30
+ torch_dtype=torch.bfloat16,
31
+ token=os.environ["HF_AUTH_TOKEN"],
32
+ revision="1e05755c1c5cb2077a0f60b83ea1368c22a17282",
33
+ ).to(DEVICE),
34
+ # "279bis - baseline - opt 18'500": AutoModelForCausalLM.from_pretrained(
35
+ # "HuggingFaceM4/idefics2",
36
+ # trust_remote_code=True,
37
+ # torch_dtype=torch.bfloat16,
38
+ # token=os.environ["HF_AUTH_TOKEN"],
39
+ # revision="5cd3c3a3eb5e0ea664f5ac09e73c9ef42da93a86",
40
+ # ).to(DEVICE),
41
+ # "286 - mix6 tables - opt 20'000": AutoModelForCausalLM.from_pretrained(
42
+ # "HuggingFaceM4/idefics2",
43
+ # trust_remote_code=True,
44
+ # torch_dtype=torch.bfloat16,
45
+ # token=os.environ["HF_AUTH_TOKEN"],
46
+ # revision="b473d49caa964991b40b79fe7cb27d51d4d023f6",
47
+ # ).to(DEVICE),
48
+ # "285 - continued pretraining on text sft - opt 2'000": AutoModelForCausalLM.from_pretrained(
49
+ # "HuggingFaceM4/idefics2",
50
+ # trust_remote_code=True,
51
+ # torch_dtype=torch.bfloat16,
52
+ # token=os.environ["HF_AUTH_TOKEN"],
53
+ # revision="b0a2a564e5dc311591886bb375e8d5a1aeaade83",
54
+ # ).to(DEVICE),
55
+ }
56
+ PROCESSOR = AutoProcessor.from_pretrained(
57
+ "HuggingFaceM4/idefics2",
58
+ token=os.environ["HF_AUTH_TOKEN"],
59
+ )
60
+ FAKE_TOK_AROUND_IMAGE = "<fake_token_around_image>"
61
+ BOS_TOKEN = PROCESSOR.tokenizer.bos_token
62
+ BAD_WORDS_IDS = PROCESSOR.tokenizer(["<image>", "<fake_token_around_image>"], add_special_tokens=False).input_ids
63
+ EOS_WORDS_IDS = PROCESSOR.tokenizer("<end_of_utterance>", add_special_tokens=False).input_ids + [PROCESSOR.tokenizer.eos_token_id]
64
+ IMAGE_SEQ_LEN = 64#list(MODELS.values())[0].config.perceiver_config.resampler_n_latents
65
+
66
+ SYSTEM_PROMPT = [
67
+ # """The following is a conversation between a highly knowledgeable and intelligent visual AI assistant, called Assistant, and a human user, called User. In the following interactions, User and Assistant will converse in natural language, and Assistant will do its best to answer User’s questions. Assistant has the ability to perceive images and reason about the content of visual inputs. Assistant was built to be respectful, polite and inclusive. It knows a lot, and always tells the truth. When prompted with an image, it does not make up facts.
68
+
69
+ # The conversation begins:""",
70
+ # """\nUser:""",
71
+ # "https://i1.pickpik.com/photos/515/906/355/kittens-cats-pet-cute-preview.jpg",
72
+ # "Describe this image.<end_of_utterance>",
73
+ # """\nAssistant: Five kittens are standing together in the center of the photograph. From the left to right, there is one orange kitten, two kittens white and black stripes, and two brown kittens with an orange head. They are in bright green grass and it looks like they are moving forward.<end_of_utterance>""",
74
+ # "\nUser:How about this image?",
75
+ # "https://cdn.pixabay.com/photo/2017/09/25/13/12/puppy-2785074_1280.jpg",
76
+ # "Can you describe it too?<end_of_utterance>",
77
+ # """\nAssistant: A dog is lying on the floor, looking at the camera. It is looking directly at you. The dog has a white body and brown patches on its face and ears. Its eyes are dark. Its nose is black, and it has long, floppy ears, white paws, long fur, big eyes.<end_of_utterance>""",
78
+ # "\nUser: What kind of breed is it?<end_of_utterance>",
79
+ # """\nAssistant: It looks like a Cavalier King Charles Spaniel. These breeds are known for their gentle, affectionate nature. Their long ears and big, brown eyes give them a sweet, expressive face. Cavaliers are small dogs weighing between 13 and 20 pounds. They have a silky, medium-length coat that comes in various colors, including black and tan, tricolor, red and white, or black.<end_of_utterance>""",
80
+ # "\nUser: What can you tell me about this breed of dogs?<end_of_utterance>",
81
+ # """\nAssistant: One specific characteristic of the Cavalier King Charles Spaniel is its friendly and affectionate nature. This breed is known for being extremely sociable and forming strong bonds with their owners. They are often described as "velcro dogs" because they love to be close to their human companions, whether it's sitting on their laps, cuddling on the couch, or simply following them around the house.<end_of_utterance>""",
82
+ # "\nUser: ghjkhjabnufs<end_of_utterance>",
83
+ # """\nAssistant: That doesn’t seem to be a word. Could you ask me another way?<end_of_utterance>""",
84
+ # "\nUser: Do you like Cavalier King Charles Spaniel?<end_of_utterance>",
85
+ # """\nAssistant: I do not have personal opinions as I’m just a computer program. However, cavaliers are known for being excellent family pets due to their gentle and patient demeanor, making them great with children and other pets. Their affectionate nature also makes them ideal therapy dogs, as they can provide comfort and emotional support to those in need.<end_of_utterance>""",
86
+ # "\nUser: How many dogs do you see in this image?",
87
+ # "https://i.dailymail.co.uk/i/pix/2011/07/01/article-2010308-0CD22A8300000578-496_634x414.jpg",
88
+ # "<end_of_utterance>",
89
+ # """\nAssistant: There is no dogs in this image. The picture shows a tennis player jumping to volley the ball.<end_of_utterance>""",
90
+ ]
91
+
92
+ API_TOKEN = os.getenv("HF_AUTH_TOKEN")
93
+ # IDEFICS_LOGO = "https://huggingface.co/spaces/HuggingFaceM4/idefics_playground/resolve/main/IDEFICS_logo.png"
94
+ BOT_AVATAR = "IDEFICS_logo.png"
95
+
96
+
97
+ # Model processing utils - these will be handled in the model processor directly ultimately
98
+ def convert_to_rgb(image):
99
+ # `image.convert("RGB")` would only work for .jpg images, as it creates a wrong background
100
+ # for transparent images. The call to `alpha_composite` handles this case
101
+ if image.mode == "RGB":
102
+ return image
103
+
104
+ image_rgba = image.convert("RGBA")
105
+ background = Image.new("RGBA", image_rgba.size, (255, 255, 255))
106
+ alpha_composite = Image.alpha_composite(background, image_rgba)
107
+ alpha_composite = alpha_composite.convert("RGB")
108
+ return alpha_composite
109
+
110
+
111
+ def custom_transform(x):
112
+ x = convert_to_rgb(x)
113
+ x = to_numpy_array(x)
114
+
115
+ height, width = x.shape[:2]
116
+ aspect_ratio = width / height
117
+ if width >= height and width > 980:
118
+ width = 980
119
+ height = int(width / aspect_ratio)
120
+ elif height > width and height > 980:
121
+ height = 980
122
+ width = int(height * aspect_ratio)
123
+ width = max(width, 378)
124
+ height = max(height, 378)
125
+
126
+ x = resize(x, (height, width), resample=PILImageResampling.BILINEAR)
127
+ x = PROCESSOR.image_processor.rescale(x, scale=1 / 255)
128
+ x = PROCESSOR.image_processor.normalize(
129
+ x,
130
+ mean=PROCESSOR.image_processor.image_mean,
131
+ std=PROCESSOR.image_processor.image_std
132
+ )
133
+ x = to_channel_dimension_format(x, ChannelDimension.FIRST)
134
+ x = torch.tensor(x)
135
+ return x
136
+
137
+
138
+ def create_model_inputs(
139
+ input_texts: List[str],
140
+ image_lists: List[List[Image.Image]],
141
+ ):
142
+ """
143
+ All this logic will eventually be handled inside the model processor.
144
+ """
145
+ inputs = PROCESSOR.tokenizer(
146
+ input_texts,
147
+ return_tensors="pt",
148
+ add_special_tokens=False,
149
+ padding=True,
150
+ )
151
+
152
+ output_images = [
153
+ [PROCESSOR.image_processor(img, transform=custom_transform) for img in im_list]
154
+ for im_list in image_lists
155
+ ]
156
+ total_batch_size = len(output_images)
157
+ max_num_images = max([len(img_l) for img_l in output_images])
158
+ if max_num_images > 0:
159
+ max_height = max([i.size(2) for img_l in output_images for i in img_l])
160
+ max_width = max([i.size(3) for img_l in output_images for i in img_l])
161
+ padded_image_tensor = torch.zeros(total_batch_size, max_num_images, 3, max_height, max_width)
162
+ padded_pixel_attention_masks = torch.zeros(
163
+ total_batch_size, max_num_images, max_height, max_width, dtype=torch.bool
164
+ )
165
+ for batch_idx, img_l in enumerate(output_images):
166
+ for img_idx, img in enumerate(img_l):
167
+ im_height, im_width = img.size()[2:]
168
+ padded_image_tensor[batch_idx, img_idx, :, :im_height, :im_width] = img
169
+ padded_pixel_attention_masks[batch_idx, img_idx, :im_height, :im_width] = True
170
+
171
+ inputs["pixel_values"] = padded_image_tensor
172
+ inputs["pixel_attention_mask"] = padded_pixel_attention_masks
173
+
174
+ return inputs
175
+
176
+
177
+ # Chatbot utils
178
+ def is_image(string: str) -> bool:
179
+ """
180
+ There are two ways for images: local image path or url.
181
+ """
182
+ return is_url(string) or string.startswith(DEFAULT_TEMP_DIR)
183
+
184
+
185
+ def is_url(string: str) -> bool:
186
+ """
187
+ Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
188
+ invalidated the url
189
+ """
190
+ if " " in string:
191
+ return False
192
+ result = urlparse(string)
193
+ return all([result.scheme, result.netloc])
194
+
195
+
196
+ def prompt_list_to_model_input(prompt_list: List[str]) -> Tuple[str, List[Image.Image]]:
197
+ """
198
+ Create the final input string and image list to feed to the model.
199
+ """
200
+ images = []
201
+ for idx, part in enumerate(prompt_list):
202
+ if is_image(part):
203
+ images.append(Image.open(part))
204
+ prompt_list[idx] = f"{FAKE_TOK_AROUND_IMAGE}{'<image>' * IMAGE_SEQ_LEN}{FAKE_TOK_AROUND_IMAGE}"
205
+ input_text = "".join(prompt_list)
206
+ input_text = input_text.replace(FAKE_TOK_AROUND_IMAGE * 2, FAKE_TOK_AROUND_IMAGE)
207
+ input_text = BOS_TOKEN + input_text.strip()
208
+ return input_text, images
209
+
210
+
211
+ def turn_is_pure_media(turn):
212
+ return turn[1] is None
213
+
214
+
215
+ def format_user_prompt_with_im_history_and_system_conditioning(
216
+ user_prompt, chat_history
217
+ ) -> List[str]:
218
+ """
219
+ Produces the resulting list that needs to go inside the processor.
220
+ It handles the potential image(s), the history and the system conditionning.
221
+ """
222
+ resulting_list = copy.deepcopy(SYSTEM_PROMPT)
223
+
224
+ # Format history
225
+ for turn in chat_history:
226
+ if turn_is_pure_media(turn):
227
+ media = turn[0][0]
228
+ if resulting_list == [] or (resulting_list != [] and resulting_list[-1].endswith("<end_of_utterance>")):
229
+ resulting_list.append("\nUser:")
230
+ resulting_list.append(media)
231
+ else:
232
+ user_utterance, assistant_utterance = turn
233
+ if resulting_list and is_image(resulting_list[-1]): # means that previous `turn` in `chat_history` was a pure media
234
+ resulting_list.append(f"{user_utterance.strip()}<end_of_utterance>\nAssistant: {assistant_utterance}<end_of_utterance>")
235
+ else:
236
+ resulting_list.append(f"\nUser: {user_utterance.strip()}<end_of_utterance>\nAssistant: {assistant_utterance}<end_of_utterance>")
237
+
238
+ # Format current input
239
+ if not user_prompt["files"]:
240
+ resulting_list.append(f"\nUser: ")
241
+ else:
242
+ # Choosing to put the image first when the image is inputted through the UI, but this is an arbiratrary choice.
243
+ resulting_list.append("\nUser:")
244
+ resulting_list.extend([im["path"] for im in user_prompt["files"]])
245
+ resulting_list.append(f"{user_prompt['text']}<end_of_utterance>\nAssistant:")
246
+
247
+ return resulting_list
248
+
249
+
250
+ # @spaces.GPU(duration=180)
251
+ def model_inference(
252
+ user_prompt,
253
+ chat_history,
254
+ decoding_strategy,
255
+ temperature,
256
+ max_new_tokens,
257
+ repetition_penalty,
258
+ top_p,
259
+ model_selector,
260
+ ):
261
+ if user_prompt["text"].strip() == "" and not user_prompt["files"]:
262
+ gr.Error("Please input a query and optionally image(s).")
263
+
264
+ if user_prompt["text"].strip() == "" and user_prompt["files"]:
265
+ gr.Error("Please input a text query along the image(s).")
266
+
267
+ for file in user_prompt["files"]:
268
+ if not file["mime_type"].startswith("image/"):
269
+ gr.Error("Idefics2 only supports images. Please input a valid image.")
270
+
271
+ formated_prompt_list = format_user_prompt_with_im_history_and_system_conditioning(
272
+ user_prompt=user_prompt,
273
+ chat_history=chat_history,
274
+ )
275
+
276
+ streamer = TextIteratorStreamer(
277
+ PROCESSOR.tokenizer,
278
+ skip_prompt=True,
279
+ )
280
+
281
+ # Common parameters to all decoding strategies
282
+ # This documentation is useful to read: https://huggingface.co/docs/transformers/main/en/generation_strategies
283
+ generation_args = {
284
+ "max_new_tokens": max_new_tokens,
285
+ "repetition_penalty": repetition_penalty,
286
+ "bad_words_ids": BAD_WORDS_IDS,
287
+ "eos_token_id": EOS_WORDS_IDS,
288
+ "streamer": streamer,
289
+ }
290
+
291
+ assert decoding_strategy in [
292
+ "Greedy",
293
+ "Top P Sampling",
294
+ ]
295
+ if decoding_strategy == "Greedy":
296
+ generation_args["do_sample"] = False
297
+ elif decoding_strategy == "Top P Sampling":
298
+ generation_args["temperature"] = temperature
299
+ generation_args["do_sample"] = True
300
+ generation_args["top_p"] = top_p
301
+
302
+
303
+ # Creating model inputs
304
+ input_text, images = prompt_list_to_model_input(formated_prompt_list)
305
+ print(input_text)
306
+ inputs = create_model_inputs([input_text], [images])
307
+ inputs = {k: v.to(DEVICE) for k, v in inputs.items()}
308
+ generation_args.update(inputs)
309
+
310
+ print("1")
311
+ thread = Thread(
312
+ target=MODELS[model_selector].generate,
313
+ kwargs=generation_args,
314
+ )
315
+ print("2")
316
+ thread.start()
317
+ acc_text = ""
318
+ print("start generating")
319
+
320
+ for text_token in streamer:
321
+ acc_text += text_token
322
+ yield acc_text
323
+ # last_turn = chat_history.pop(-1)
324
+ # last_turn[-1] += acc_text
325
+ # if last_turn[-1].endswith("\nUser"):
326
+ # # Safeguard: sometimes (rarely), the model won't generate the token `<end_of_utterance>` and will go directly to generating `\nUser:`
327
+ # # It will thus stop the generation on `\nUser:`. But when it exits, it will have already generated `\nUser`
328
+ # # This post-processing ensures that we don't have an additional `\nUser` wandering around.
329
+ # last_turn[-1] = last_turn[-1][:-5]
330
+ # chat_history.append(last_turn)
331
+ # yield "", None, chat_history
332
+ # acc_text = ""
333
+
334
+
335
+ with gr.Blocks() as demo:
336
+ with gr.Row(elem_id="model_selector_row"):
337
+ model_selector = gr.Dropdown(
338
+ choices=MODELS.keys(),
339
+ value="284 - neftune - opt 18'500",
340
+ interactive=True,
341
+ show_label=False,
342
+ container=False,
343
+ label="Model",
344
+ visible=True,
345
+ )
346
+
347
+ # Hyper-parameters for generation
348
+ max_new_tokens = gr.Slider(
349
+ minimum=8,
350
+ maximum=1024,
351
+ value=512,
352
+ step=1,
353
+ interactive=True,
354
+ label="Maximum number of new tokens to generate",
355
+ visible=False,
356
+ )
357
+ repetition_penalty = gr.Slider(
358
+ minimum=0.01,
359
+ maximum=5.0,
360
+ value=1.0,
361
+ step=0.01,
362
+ interactive=True,
363
+ label="Repetition penalty",
364
+ info="1.0 is equivalent to no penalty",
365
+ visible=False,
366
+ )
367
+ decoding_strategy = gr.Radio(
368
+ [
369
+ "Greedy",
370
+ "Top P Sampling",
371
+ ],
372
+ value="Greedy",
373
+ label="Decoding strategy",
374
+ interactive=True,
375
+ info="Higher values is equivalent to sampling more low-probability tokens.",
376
+ visible=False,
377
+ )
378
+ temperature = gr.Slider(
379
+ minimum=0.0,
380
+ maximum=5.0,
381
+ value=0.4,
382
+ step=0.1,
383
+ interactive=True,
384
+ visible=False,
385
+ label="Sampling temperature",
386
+ info="Higher values will produce more diverse outputs.",
387
+ )
388
+ top_p = gr.Slider(
389
+ minimum=0.01,
390
+ maximum=0.99,
391
+ value=0.8,
392
+ step=0.01,
393
+ interactive=True,
394
+ visible=False,
395
+ label="Top P",
396
+ info="Higher values is equivalent to sampling more low-probability tokens.",
397
+ )
398
+ decoding_strategy.change(
399
+ fn=lambda selection: gr.Slider(
400
+ visible=(
401
+ selection in ["contrastive_sampling", "beam_sampling", "Top P Sampling", "sampling_top_k"]
402
+ )
403
+ ),
404
+ inputs=decoding_strategy,
405
+ outputs=temperature,
406
+ )
407
+ decoding_strategy.change(
408
+ fn=lambda selection: gr.Slider(visible=(selection in ["Top P Sampling"])),
409
+ inputs=decoding_strategy,
410
+ outputs=top_p,
411
+ )
412
+
413
+ gr.ChatInterface(
414
+ fn=model_inference,
415
+ # examples=[{"text": "hello"}, {"text": "hola"}, {"text": "merhaba"}],
416
+ title="Echo Bot",
417
+ multimodal=True,
418
+ additional_inputs=[decoding_strategy, temperature, max_new_tokens, repetition_penalty, top_p, model_selector],
419
+ )
420
+
421
+
422
+ demo.launch()