import gradio as gr from llava.model.builder import load_pretrained_model from llava.mm_utils import process_images, tokenizer_image_token from llava.constants import IMAGE_TOKEN_INDEX, DEFAULT_IMAGE_TOKEN from llava.conversation import conv_templates from PIL import Image import copy import torch import warnings import requests warnings.filterwarnings("ignore") pretrained = "AI-Safeguard/Ivy-VL-llava" model_name = "llava_qwen" device = "cuda" device_map = "auto" # Load model, tokenizer, and image processor tokenizer, model, image_processor, max_length = load_pretrained_model(pretrained, None, model_name, device_map=device_map) model.eval() def respond( message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p, image=None, ): messages = [{"role": "system", "content": system_message}] for val in history: if val[0]: messages.append({"role": "user", "content": val[0]}) if val[1]: messages.append({"role": "assistant", "content": val[1]}) if image: # Process image if provided image_tensor = process_images([image], image_processor, model.config) image_tensor = [_image.to(dtype=torch.float16, device=device) for _image in image_tensor] conv_template = "qwen_1_5" question = DEFAULT_IMAGE_TOKEN + "\n" + message conv = copy.deepcopy(conv_templates[conv_template]) conv.append_message(conv.roles[0], question) conv.append_message(conv.roles[1], None) prompt_question = conv.get_prompt() input_ids = tokenizer_image_token(prompt_question, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt").unsqueeze(0).to(device) image_sizes = [image.size] cont = model.generate( input_ids, images=image_tensor, image_sizes=image_sizes, do_sample=False, temperature=temperature, max_new_tokens=max_tokens, ) response = tokenizer.batch_decode(cont, skip_special_tokens=True)[0] else: messages.append({"role": "user", "content": message}) payload = { "messages": messages, "max_tokens": max_tokens, "temperature": temperature, "top_p": top_p, } response = "" for message in client.chat_completion( payload, stream=True, ): token = message.choices[0].delta.content response += token yield response demo = gr.ChatInterface( respond, additional_inputs=[ gr.Textbox(value="You are a friendly Chatbot.", label="System message"), gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"), gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", ), gr.Image(type="filepath", label="Input Image (optional)"), ], ) if __name__ == "__main__": demo.launch()