import logging import os import re import time from io import BytesIO import gradio as gr import requests import torch import transformers from accelerate.utils import get_max_memory from joblib import Parallel, delayed from PIL import Image from transformers import AutoTokenizer from m4.models.vbloom import configuration_vbloom, modeling_vbloom from m4.models.vgpt2 import configuration_vgpt2, modeling_vgpt2 from m4.models.vgpt_neo import configuration_vgpt_neo, modeling_vgpt_neo from m4.models.vllama import configuration_vllama, modeling_vllama from m4.models.vopt import configuration_vopt, modeling_vopt from m4.training.packing import image_attention_mask_for_packed_input_ids, incremental_to_binary_attention_mask from m4.training.utils import build_image_transform logging.basicConfig(level=logging.INFO) logger = logging.getLogger() CURRENT_MODEL = "" # CURRENT_MODEL = "tr_177_4datasets_alpha_baseline_opt_step-2000" MAX_TRIES = 3 TOKENIZER_FAST = True MAX_SEQ_LEN = 1024 model, tokenizer = None, None MODEL_TO_DISPLAY_NAME = { "tr_199_w_xattn_opt_step-65000": "VLlama - tr_199_w_xattn_opt_step-65000", # "tr_201_sft_on_lrv_opt_step-15000": "VLlama - tr_201_sft_on_lrv_opt_step-15000", # "tr_202bis_ift_llava_all_unfrozen_opt_step-14128": "VLlama - tr_202bis_ift_llava_all_unfrozen_opt_step-14128", # "tr_203_ift_m3it_opt_step-50000": "VLlama - tr_203_ift_m3it_opt_step-50000", # "tr_205_sft_ultrachat_opt_step-20000": "VLlama - tr_205_sft_ultrachat_opt_step-20000", # "tr_207_ift_svit_opt_step-14627": "VLlama - tr_207_ift_svit_opt_step-14627", "tr_209_ift_mixture_opt_step-14000": "VLlama - tr_209_ift_mixture_opt_step-14000", } MODEL_TO_MODEL_CLASS = { "tr_199_w_xattn_opt_step-65000": "VLlamaForCausalLM", # "tr_201_sft_on_lrv_opt_step-15000": "VLlamaForCausalLM", # "tr_202bis_ift_llava_all_unfrozen_opt_step-14128": "VLlamaForCausalLM", # "tr_203_ift_m3it_opt_step-50000": "VLlamaForCausalLM", # "tr_205_sft_ultrachat_opt_step-20000": "VLlamaForCausalLM", # "tr_207_ift_svit_opt_step-14627": "VLlamaForCausalLM", "tr_209_ift_mixture_opt_step-14000": "VLlamaForCausalLM", } MODEL_TO_CONFIG_CLASS = { "tr_199_w_xattn_opt_step-65000": "VLlamaConfig", # "tr_201_sft_on_lrv_opt_step-15000": "VLlamaConfig", # "tr_202bis_ift_llava_all_unfrozen_opt_step-14128": "VLlamaConfig", # "tr_203_ift_m3it_opt_step-50000": "VLlamaConfig", # "tr_205_sft_ultrachat_opt_step-20000": "VLlamaConfig", # "tr_207_ift_svit_opt_step-14627": "VLlamaConfig", "tr_209_ift_mixture_opt_step-14000": "VLlamaConfig", } def load_tokenizer_model(model_name, model_class): tokenizer = AutoTokenizer.from_pretrained( model_name, use_fast=TOKENIZER_FAST, use_auth_token=os.getenv("HF_AUTH_TOKEN", True), # `use_fast=False` for 1B3 OPT, True for all the other models ) tokenizer.padding_side = "left" config_class = MODEL_TO_CONFIG_CLASS[model_name.split("/")[-1]] # assert tokenizer.is_fast supported_custom_modules = { "vgpt2": modeling_vgpt2, "vbloom": modeling_vbloom, "vgptneo": modeling_vgpt_neo, "vopt": modeling_vopt, "vllama": modeling_vllama, } supported_custom_configs = { "vgpt2": configuration_vgpt2, "vbloom": configuration_vbloom, "vgptneo": configuration_vgpt_neo, "vopt": configuration_vopt, "vllama": configuration_vllama, } parent_config_class = ( [v for k, v in supported_custom_configs.items() if k in model_class.lower()] + [transformers] )[0] parent_model_class = ( [v for k, v in supported_custom_modules.items() if k in model_class.lower()] + [transformers] )[0] config_class = getattr(parent_config_class, config_class) model_class = getattr(parent_model_class, model_class) config = config_class.from_pretrained(model_name, use_auth_token=os.getenv("HF_AUTH_TOKEN", True)) max_memory_map = get_max_memory() for key in max_memory_map.keys(): if key != "cpu": # Get this in GB max_memory_map[key] = max_memory_map[key] // (1024 * 1024 * 1024) # Decrease 2 for Pytorch overhead and 2 for the forward to be safe max_memory_map[key] = f"{max_memory_map[key] - 4} GiB" model = model_class.from_pretrained( model_name, use_auth_token=os.getenv("HF_AUTH_TOKEN", True), device_map="auto", offload_folder="./offload", torch_dtype=config.torch_dtype, max_memory=max_memory_map, ) model.eval() print("Current device map:", model.hf_device_map) print("Model default generation config:", model.generation_config) # TODO: the device_map looks very inefficien right now. that could be improved # it typically looks like that # { # 'model.embed_tokens': 0, # 'model.vision_model': 0, # 'model.layers.0': 0, # 'model.layers.1': 0, # 'model.layers.2': 0, # 'model.layers.3': 0, # 'model.layers.4': 0, # 'model.layers.5': 0, # 'model.layers.6': 1, # 'model.layers.7': 1, # 'model.layers.8': 1, # 'model.layers.9': 1, # 'model.layers.10': 1, # 'model.layers.11': 1, # 'model.layers.12': 1, # 'model.layers.13': 1, # 'model.layers.14': 1, # 'model.layers.15': 1, # 'model.layers.16': 1, # 'model.layers.17': 2, # 'model.layers.18': 2, # 'model.layers.19': 2, # 'model.layers.20': 2, # 'model.layers.21': 2, # 'model.layers.22': 2, # 'model.layers.23': 2, # 'model.layers.24': 2, # 'model.layers.25': 2, # 'model.layers.26': 2, # 'model.layers.27': 2, # 'model.layers.28': 3, # 'model.layers.29': 3, # 'model.layers.30': 3, # 'model.layers.31': 3, # 'model.gated_cross_attn_layers.0': 3, # 'model.gated_cross_attn_layers.1': 3, # 'model.gated_cross_attn_layers.2': 3, # 'model.gated_cross_attn_layers.3': 3, # 'model.gated_cross_attn_layers.4': 3, # 'model.gated_cross_attn_layers.5': 3, # 'model.gated_cross_attn_layers.6': 3, # 'model.gated_cross_attn_layers.7': 3, # 'model.gated_cross_attn_layers.8': 4, # 'model.gated_cross_attn_layers.9': 4, # 'model.gated_cross_attn_layers.10': 4, # 'model.gated_cross_attn_layers.11': 4, # 'model.gated_cross_attn_layers.12': 4, # 'model.gated_cross_attn_layers.13': 4, # 'model.gated_cross_attn_layers.14': 4, # 'model.gated_cross_attn_layers.15': 4, # 'model.norm': 4, # 'lm_head': 4 # } which means there is a lot of things going around between the gated cross attention layers and the LM layers... return tokenizer, model MODEL_TO_SPACE_MAPPING = {} IS_MAIN_SPACE = CURRENT_MODEL not in MODEL_TO_MODEL_CLASS if IS_MAIN_SPACE: for model in MODEL_TO_MODEL_CLASS: MODEL_TO_SPACE_MAPPING[model] = gr.Blocks.load( name=f"spaces/HuggingFaceM4/{model}", api_key=os.getenv("HF_AUTH_TOKEN", True) ) else: model_path = f"HuggingFaceM4/{CURRENT_MODEL}" tokenizer, model = load_tokenizer_model(model_path, MODEL_TO_MODEL_CLASS[CURRENT_MODEL]) def fetch_images(url_images): images = [] for url in url_images: if isinstance(url, str): images.append(Image.open(BytesIO(requests.get(url, stream=True).content))) else: images.append(url) return images def model_generation( prompt, images, tokenizer, model, temperature, no_repeat_ngram_size, max_new_tokens, min_length, ban_tokens, eos_tokens, force_words, repetition_penalty, hide_special_tokens, decoding_strategy, num_beams, length_penalty, top_k, top_p, penalty_alpha, ): # Preparing inputs tokens = tokenizer( [prompt], truncation=True, max_length=MAX_SEQ_LEN, padding=True, add_special_tokens=False, ) input_ids = torch.tensor([[tokenizer.bos_token_id] + tokens.input_ids[0]]) attention_mask = torch.tensor([[1] + tokens.attention_mask[0]]) image_attention_mask = [ incremental_to_binary_attention_mask( image_attention_mask_for_packed_input_ids(input_ids[0].unsqueeze(0), tokenizer)[0], num_classes=len(images) ) ] image_transform = build_image_transform(eval=True) pixel_values = [torch.stack([image_transform(img) for img in images])] input_ids = input_ids.to(0) attention_mask = attention_mask.to(0) pixel_values = torch.stack(pixel_values).to(0) image_attention_mask = torch.cat(image_attention_mask, 0).to(0) # Excluding some words from the generation bad_words_ids = None ban_tokens = ban_tokens.replace("\\n", "\n") bad_words = ban_tokens.split(";") if len(bad_words) > 0: bad_words_ids = tokenizer(bad_words, add_special_tokens=False).input_ids # Forcing some words in the generation force_words_ids = None if force_words != "": force_words = force_words.replace("\\n", "\n") force_words = force_words.split(";") if len(force_words) > 0: force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids eos_token_ids = None if eos_tokens != "": eos_tokens = eos_tokens.replace("\\n", "\n") eos_tokens = eos_tokens.split(";") if len(eos_tokens) > 0: eos_token_ids = [] for eos_token in eos_tokens: tokenized_eos_token = tokenizer(eos_token, add_special_tokens=False).input_ids if len(tokenized_eos_token) > 1: raise ValueError( f"eos_tokens should be one token, here {eos_token} is {len(tokenized_eos_token)} tokens:" f" {tokenized_eos_token}" ) eos_token_ids += tokenized_eos_token # Inputs input_args = { "input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values, "image_attention_mask": image_attention_mask, } # Common parameters to all decoding strategies # This documentation is useful to read: https://huggingface.co/docs/transformers/main/en/generation_strategies generation_args = { "temperature": temperature, "no_repeat_ngram_size": no_repeat_ngram_size, "max_new_tokens": max_new_tokens, "min_length": min_length, "bad_words_ids": bad_words_ids, "force_words_ids": force_words_ids, "repetition_penalty": repetition_penalty, "eos_token_id": eos_token_ids, } assert decoding_strategy in [ "greedy", "beam_search", "beam_sampling", "sampling_top_k", "sampling_top_p", "contrastive_sampling", ] if decoding_strategy == "greedy": pass elif decoding_strategy == "beam_search": generation_args["num_beams"] = num_beams generation_args["length_penalty"] = length_penalty assert generation_args["num_beams"] > 1 elif decoding_strategy == "beam_sampling": generation_args["num_beams"] = num_beams generation_args["length_penalty"] = length_penalty generation_args["do_sample"] = True assert generation_args["num_beams"] > 1 elif decoding_strategy == "sampling_top_k": generation_args["do_sample"] = True generation_args["top_k"] = top_k elif decoding_strategy == "sampling_top_p": generation_args["do_sample"] = True generation_args["top_p"] = top_p elif decoding_strategy == "contrastive_sampling": generation_args["do_sample"] = True generation_args["penalty_alpha"] = penalty_alpha generation_args["top_k"] = top_k generated_tokens = model.generate( **input_args, **generation_args, ) tokens = tokenizer.convert_ids_to_tokens(generated_tokens[0]) decoded_skip_special_tokens = repr( tokenizer.batch_decode(generated_tokens, skip_special_tokens=hide_special_tokens)[0] ) decoded = repr(tokenizer.batch_decode(generated_tokens)[0]) logger.info( "Result: \n" f"Prompt: `{prompt}`\n" f"Tokens ids from prompt + generation: `{generated_tokens[0].tolist()}`\n" f"Tokens (converted) from prompt + generation: `{tokens}`\n" f"String decoded with skipped special tokens: `{decoded_skip_special_tokens}`\n" f"String decoded: `{decoded}`\n" f"Generation mode: `{decoding_strategy}`\n" f"Generation parameters: `{generation_args}`\n" ) original_prompt = generated_tokens[:, : input_ids.shape[-1]] actual_generated_tokens = generated_tokens[:, input_ids.shape[-1] :] first_end_token = len(actual_generated_tokens[0]) actual_generated_tokens = actual_generated_tokens[:, :first_end_token] displayed_tokens = torch.cat([original_prompt, actual_generated_tokens], dim=-1) generated_text = tokenizer.batch_decode(displayed_tokens, skip_special_tokens=hide_special_tokens)[0] return generated_text def model_inference( files, prompt, temperature, no_repeat_ngram_size, max_new_tokens, min_length, ban_tokens, eos_tokens, force_words, repetition_penalty, hide_special_tokens, decoding_strategy, num_beams, length_penalty, top_k, top_p, penalty_alpha, ): if isinstance(files, str) and len(files) == 0: files = None prompt = prompt.strip() prompt = prompt.replace("\\n", "\n") file_idx = 0 url_images = re.findall(r"", prompt) for idx, url_image in enumerate(url_images): if len(url_image) == 0: url_images[idx] = Image.open(files[file_idx].name if hasattr(files[file_idx], "name") else files[file_idx]) file_idx += 1 else: prompt = prompt.replace(url_image, "") url_images[idx] = url_images[idx][1:] images = fetch_images(url_images) global model, tokenizer generated_text = model_generation( prompt=prompt, images=images, tokenizer=tokenizer, model=model, temperature=temperature, no_repeat_ngram_size=no_repeat_ngram_size, max_new_tokens=max_new_tokens, min_length=min_length, ban_tokens=ban_tokens, eos_tokens=eos_tokens, force_words=force_words, repetition_penalty=repetition_penalty, hide_special_tokens=hide_special_tokens, decoding_strategy=decoding_strategy, num_beams=num_beams, length_penalty=length_penalty, top_k=top_k, top_p=top_p, penalty_alpha=penalty_alpha, ) return generated_text.strip() def try_model_inference( model, files, prompt, temperature, no_repeat_ngram_size, max_new_tokens, min_length, ban_tokens, eos_tokens, force_words, repetition_penalty, hide_special_tokens, decoding_strategy, num_beams, length_penalty, top_k, top_p, penalty_alpha, ): count = 0 while count < MAX_TRIES: try: return MODEL_TO_SPACE_MAPPING[model]( files, prompt, temperature, no_repeat_ngram_size, max_new_tokens, min_length, ban_tokens, eos_tokens, force_words, repetition_penalty, hide_special_tokens, decoding_strategy, num_beams, length_penalty, top_k, top_p, penalty_alpha, api_name="model_inference", ) except KeyError: # Gradio return {'error': None} some times. time.sleep(3) count += 1 pass def all_model_inference( prompt, temperature, no_repeat_ngram_size, max_new_tokens, min_length, ban_tokens, eos_tokens, force_words, repetition_penalty, hide_special_tokens, decoding_strategy, num_beams, length_penalty, top_k, top_p, penalty_alpha, ): outputs = [] print( prompt, temperature, no_repeat_ngram_size, max_new_tokens, min_length, ban_tokens, eos_tokens, force_words, repetition_penalty, hide_special_tokens, decoding_strategy, num_beams, length_penalty, top_k, top_p, penalty_alpha, ) outputs = Parallel(n_jobs=len(MODEL_TO_SPACE_MAPPING), backend="threading")( delayed(try_model_inference)( model, os.path.join(os.path.dirname(__file__), "images", "bear.jpg"), prompt, temperature, no_repeat_ngram_size, max_new_tokens, min_length, ban_tokens, eos_tokens, force_words, repetition_penalty, hide_special_tokens, decoding_strategy, num_beams, length_penalty, top_k, top_p, penalty_alpha, ) for model in MODEL_TO_SPACE_MAPPING ) if len(outputs) == 1: outputs = outputs[0] return outputs examples = [ [ None, """The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Assistant, and a human user, called User. In the following interactions, User and Assistant will converse in natural language, and Assistant will do its best to answer User’s questions. Assistant was built to be respectful, polite and inclusive. It knows a lot, and always tells the truth. When prompted with an image, it does not make up facts. The conversation begins: User:Describe this image. Assistant: Five kittens are standing together in the center of the photograph. From the left to right, there is one orange kitte, two kittens white and black stripes, and two brown kittens with an orange head. They are in bright green grass and it looks like they are moving forward. User:How about this image?Can you describe it too? Assistant: A dog is lying on the floor, looking at the camera. It is looking directly at you. The dog has a white body and brown patches on its face and ears. Its eyes are dark. Its nose is black, and it has long, floppy ears, white paws, long fur, big eyes. User: What kind of breed is it? Assistant: It looks like a Cavalier King Charles Spaniel. These breeds are known for their gentle, affectionate nature. Their long ears and big, brown eyes give them a sweet, expressive face. Cavaliers are small dogs weighing between 13 and 20 pounds. They have a silky, medium-length coat that comes in various colors, including black and tan, tricolor, red and white, or black. User: What can you tell me about this breed of dogs? Assistant: One specific characteristic of the Cavalier King Charles Spaniel is its friendly and affectionate nature. This breed is known for being extremely sociable and forming strong bonds with their owners. They are often described as "velcro dogs" because they love to be close to their human companions, whether it's sitting on their laps, cuddling on the couch, or simply following them around the house. User: ghjkhjabnufsAssistant: That doesn’t seem to be a word. Could you ask me another way? User: Do you like Cavalier King Charles Spaniel? Assistant: I do not have personal opinions as I’m just a computer program. However, cavaliers are known for being excellent family pets due to their gentle and patient demeanor, making them great with children and other pets. Their affectionate nature also makes them ideal therapy dogs, as they can provide comfort and emotional support to those in need. User: How many dogs do you see in this image? Assistant: There is no dogs in this image. The picture shows a tennis player jumping to volley the ball. User:Describe all of the parts of this image. Assistant:""", 1.0, 0, 256, 10, ";", ";User", "", 1.0, False, "greedy", 1, 1.0, 50, 0.5, 0.95, ], # [ # None, # """This is a conversation between a human, User, and an intelligent visual AI, Bot. User sends images, and Bot answer the questions from the user. # User: # Describe this image. # Bot:""", # 1, # 2, # 64, # 10, # ";;User;user;Bot;bot;Question;question;Answer;answer;\n", # False, # False, # True, # ], # [ # None, # """This is a conversation between a human, User, and an intelligent visual AI, Bot. User sends images, and Bot answer the questions from the user. # User: # Why do people find this image funny? # Bot:""", # 1, # 2, # 64, # 10, # ";;User;user;Bot;bot;Question;question;Answer;answer;\n", # False, # False, # True, # ], # [ # None, # """This is a conversation between a human, User, and an intelligent visual AI, Bot. User sends images, and Bot answer the questions from the user. # User: # Describe what's in this image. # Bot:""", # 1, # 2, # 64, # 10, # ";;User;user;Bot;bot;Question;question;Answer;answer;\n", # False, # False, # True, # ], # [ # None, # """This is a conversation between a human, User, and an intelligent visual AI, Bot. User sends images, and Bot answer the questions from the user. # User: # What's the correct answer? A, B, C or D? # Bot:""", # 1, # 2, # 64, # 10, # ";;User;user;Bot;bot;Question;question;Answer;answer;\n", # False, # False, # True, # ], ] MSG_MAIN = """ # Text generation with Vllama models ### Help to write prompts: Put the urls to the images inside the image tokens, it will be converted into the real image tokens. Put before and after each image token WITHOUT space. The texts \\n will be converted into real newline characters. See examples and additional details below. """ # MSG_DETAILS = """ # ### Additional details # - if the model was trained with the template 1 (`\\n\\n\\n\\n`), then `` will be replaced with `\\n\\n`. This is particularly useful if you are comparing the performance of different models trained with different templates. # - special tokens are not automatically added to the prompt, so add them manually. # - with the first template `\\n\\n\\n\\n` , the sequence isn't necessary tokenized as `["\\n\\n", "", "\\n\\n"]` to enforce this behavior, you can use the "Integrate image sequence as ids" parameter. # """ # if ~IS_MAIN_SPACE: # MSG_DETAILS += ( # "- alternatively, you can upload images and then directly specify them via \ tag in the prompt." # ) with gr.Blocks() as demo: gr.Markdown(MSG_MAIN) with gr.Row(): with gr.Column(): gr.Markdown("## Input") if not IS_MAIN_SPACE: images = gr.File(label="Images", file_count="multiple") prompt = gr.Textbox(label="Prompt", placeholder="Enter the prompt here") gr.Markdown("## Common parameters to all decoding strategy") temperature = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=1.0, label="Softmax temperature") no_repeat_ngram_size = gr.Slider( minimum=0, maximum=10, step=1, value=0, label="The size of an n-gram that cannot occur more than once (0=infinity)", ) max_new_tokens = gr.Slider( minimum=0, maximum=512, step=1, value=256, label="Maximum number of new tokens to generate" ) min_length = gr.Slider( minimum=0, maximum=512, step=1, value=16, label="Minimum length of the sequence to be generated" ) ban_tokens = gr.Textbox( label='Tokens to prevent from being generated (separated by ";")', value=";", ) eos_tokens = gr.Textbox(label="EOS tokens", value="") force_words = gr.Textbox(label='Force words to be generated (separated by ";")', value="") repetition_penalty = gr.Slider( minimum=0, maximum=10, step=0.01, value=1, label="repetition_penalty. CTRL paper suggests 1.2." ) hide_special_tokens = gr.Checkbox(label="Hide special tokens in the text", value=False) gr.Markdown("## Decoding strategy and its specific parameters") decoding_strategy = gr.Dropdown( ["greedy", "beam_search", "beam_sampling", "sampling_top_k", "sampling_top_p", "contrastive_sampling"], label="Decoding strategy", value="greedy", ) num_beams = gr.Slider( minimum=0, maximum=10, step=1, value=3, label="Beam size", info="Only used if `decoding_strategy` is `beam_search` or `beam_sampling`", ) length_penalty = gr.Slider( minimum=-1000, maximum=1000, step=0.1, value=1, label=( "length_penalty > 0.0 promotes longer sequences, while length_penalty < 0.0 encourages shorter" " sequences. Only used if `decoding_strategy` is `beam_search` or `beam_sampling`" ), ) top_k = gr.Slider( minimum=0, maximum=500, step=1, value=50, label="Top k", info="Only used if `decoding_strategy` is `sampling_top_k` or `contrastive_sampling`", ) top_p = gr.Slider( minimum=0, maximum=1, step=0.01, value=0.95, label="Top p", info="Only used if `decoding_strategy` is `sampling_top_p`", ) penalty_alpha = gr.Slider( minimum=0, maximum=1, step=0.01, value=0.95, label="Penalty alpha", info="Only used if `decoding_strategy` is `contrastive_sampling`", ) submit = gr.Button(label="Generate") with gr.Column(): if IS_MAIN_SPACE: outputs = [ gr.Textbox(label=MODEL_TO_DISPLAY_NAME[model], multiline=True, readonly=True) for model in MODEL_TO_MODEL_CLASS ] inference_func = all_model_inference inputs = [ prompt, temperature, no_repeat_ngram_size, max_new_tokens, min_length, ban_tokens, eos_tokens, force_words, repetition_penalty, hide_special_tokens, decoding_strategy, num_beams, length_penalty, top_k, top_p, penalty_alpha, ] examples = [example[1:] for example in examples] else: outputs = gr.Textbox(label="Generated text", interactive=False) inference_func = model_inference inputs = [ images, prompt, temperature, no_repeat_ngram_size, max_new_tokens, min_length, ban_tokens, eos_tokens, force_words, repetition_penalty, hide_special_tokens, decoding_strategy, num_beams, length_penalty, top_k, top_p, penalty_alpha, ] with gr.Row(): gr.Examples(inputs=inputs, examples=examples) # gr.Markdown(MSG_DETAILS) submit.click(inference_func, inputs=inputs, outputs=outputs, api_name="model_inference") demo.queue() demo.launch()