import spaces import argparse from ast import parse import datetime import json import os import time import hashlib import re import torch import gradio as gr import requests import random from filelock import FileLock from io import BytesIO from PIL import Image, ImageDraw, ImageFont from models import load_image from constants import LOGDIR, DEFAULT_IMAGE_TOKEN from utils import ( build_logger, server_error_msg, violates_moderation, moderation_msg, load_image_from_base64, get_log_filename, ) from threading import Thread import traceback # import torch from conversation import Conversation from transformers import AutoModel, AutoTokenizer, TextIteratorStreamer import subprocess subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) torch.set_default_device('cuda') logger = build_logger("gradio_web_server", "gradio_web_server.log") headers = {"User-Agent": "Vintern-1B-3.5-Demo Client"} no_change_btn = gr.Button() enable_btn = gr.Button(interactive=True) disable_btn = gr.Button(interactive=False) @spaces.GPU(duration=10) def make_zerogpu_happy(): pass def write2file(path, content): lock = FileLock(f"{path}.lock") with lock: with open(path, "a") as fout: fout.write(content) get_window_url_params = """ function() { const params = new URLSearchParams(window.location.search); url_params = Object.fromEntries(params); console.log(url_params); return url_params; } """ def init_state(state=None): if state is not None: del state return Conversation() def vote_last_response(state, liked, request: gr.Request): conv_data = { "tstamp": round(time.time(), 4), "like": liked, "model": 'Vintern-1B-v3_5', "state": state.dict(), "ip": request.client.host, } write2file(get_log_filename(), json.dumps(conv_data) + "\n") def upvote_last_response(state, request: gr.Request): logger.info(f"upvote. ip: {request.client.host}") vote_last_response(state, True, request) textbox = gr.MultimodalTextbox(value=None, interactive=True) return (textbox,) + (disable_btn,) * 3 def downvote_last_response(state, request: gr.Request): logger.info(f"downvote. ip: {request.client.host}") vote_last_response(state, False, request) textbox = gr.MultimodalTextbox(value=None, interactive=True) return (textbox,) + (disable_btn,) * 3 def vote_selected_response( state, request: gr.Request, data: gr.LikeData ): logger.info( f"Vote: {data.liked}, index: {data.index}, value: {data.value} , ip: {request.client.host}" ) conv_data = { "tstamp": round(time.time(), 4), "like": data.liked, "index": data.index, "model": 'Vintern-1B-v3_5', "state": state.dict(), "ip": request.client.host, } write2file(get_log_filename(), json.dumps(conv_data) + "\n") return def flag_last_response(state, request: gr.Request): logger.info(f"flag. ip: {request.client.host}") vote_last_response(state, "flag", request) textbox = gr.MultimodalTextbox(value=None, interactive=True) return (textbox,) + (disable_btn,) * 3 def regenerate(state, image_process_mode, request: gr.Request): logger.info(f"regenerate. ip: {request.client.host}") # state.messages[-1][-1] = None state.update_message(Conversation.ASSISTANT, content='', image=None, idx=-1) prev_human_msg = state.messages[-2] if type(prev_human_msg[1]) in (tuple, list): prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode) state.skip_next = False textbox = gr.MultimodalTextbox(value=None, interactive=True) return (state, state.to_gradio_chatbot(), textbox) + (disable_btn,) * 5 def clear_history(request: gr.Request): logger.info(f"clear_history. ip: {request.client.host}") state = init_state() textbox = gr.MultimodalTextbox(value=None, interactive=True) return (state, state.to_gradio_chatbot(), textbox) + (disable_btn,) * 5 def add_text(state, message, system_prompt, request: gr.Request): if not state: state = init_state() images = message.get("files", []) text = message.get("text", "").strip() # logger.info(f"add_text. ip: {request.client.host}. len: {len(text)}") # import pdb; pdb.set_trace() textbox = gr.MultimodalTextbox(value=None, interactive=False) if len(text) <= 0 and len(images) == 0: state.skip_next = True return (state, state.to_gradio_chatbot(), textbox) + (no_change_btn,) * 5 if args.moderate: flagged = violates_moderation(text) if flagged: state.skip_next = True textbox = gr.MultimodalTextbox( value={"text": moderation_msg}, interactive=True ) return (state, state.to_gradio_chatbot(), textbox) + (no_change_btn,) * 5 images = [Image.open(path).convert("RGB") for path in images] # Init again if send the second image if len(images) > 0 and len(state.get_images(source=state.USER)) > 0: state = init_state(state) # Upload the first image if len(images) > 0 and len(state.get_images(source=state.USER)) == 0: if len(state.messages) == 0: ## In case the first message is an image text = DEFAULT_IMAGE_TOKEN + "\n" + system_prompt + "\n" + text else: ## In case the image is uploaded after some text messages first_user_message = state.messages[0]['content'] state.update_message(Conversation.USER, DEFAULT_IMAGE_TOKEN + "\n" + first_user_message, None, 0) # If the first message is text if len(images) == 0 and len(state.get_images(source=state.USER)) == 0 and len(state.messages) == 0: text = system_prompt + "\n" + text state.set_system_message(system_prompt) state.append_message(Conversation.USER, text, images) state.skip_next = False return (state, state.to_gradio_chatbot(), textbox) + ( disable_btn, ) * 5 model_name = "5CD-AI/Vintern-1B-v3_5" model = AutoModel.from_pretrained( model_name, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, trust_remote_code=True, ).eval().cuda() tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, use_fast=False) @spaces.GPU def predict(state, image_path, max_input_tiles=6, temperature=1.0, max_output_tokens=700, top_p=0.7, repetition_penalty=2.5, do_sample=False): # history = state.get_prompt()[:-1] # logger.info(f"==== History ====\n{history}") generation_config = dict(temperature=temperature, max_new_tokens=max_output_tokens, top_p=top_p, do_sample=do_sample, num_beams = 3, repetition_penalty=repetition_penalty) pixel_values = None if image_path is not None: pixel_values = load_image(image_path, max_num=max_input_tiles).to(torch.bfloat16).cuda() if pixel_values is not None: logger.info(f"==== Lenght Pixel values ====\n{len(pixel_values)}") # Check the first user message to see if it is an image index, first_user_message = state.get_user_message(source=state.USER, position='first') if first_user_message is not None and \ DEFAULT_IMAGE_TOKEN not in first_user_message: state.update_message(state.USER, DEFAULT_IMAGE_TOKEN + "\n" + first_user_message, None, index) history = state.get_history() logger.info(f"==== History ====\n{history}") _, message = state.get_user_message(source=state.USER, position='last') response, conv_history = model.chat(tokenizer, pixel_values, message, generation_config, history=history, return_history=True) logger.info(f"==== Conv History ====\n{conv_history}") return response, conv_history def ai_bot( state, temperature, do_sample, top_p, repetition_penalty, max_new_tokens, max_input_tiles, request: gr.Request, ): logger.info(f"ai_bot. ip: {request.client.host}") start_tstamp = time.time() if hasattr(state, "skip_next") and state.skip_next: # This generate call is skipped due to invalid inputs yield ( state, state.to_gradio_chatbot(), gr.MultimodalTextbox(interactive=False), ) + (no_change_btn,) * 5 return if model is None: state.update_message(Conversation.ASSISTANT, server_error_msg) yield ( state, state.to_gradio_chatbot(), gr.MultimodalTextbox(interactive=False), disable_btn, disable_btn, disable_btn, enable_btn, enable_btn, ) return all_images = state.get_images(source=state.USER) all_image_paths = [state.save_image(image) for image in all_images] state.append_message(Conversation.ASSISTANT, state.streaming_placeholder) yield ( state, state.to_gradio_chatbot(), gr.MultimodalTextbox(interactive=False), ) + (disable_btn,) * 5 try: # Stream output logger.info(f"==== Image paths ====\n{all_image_paths}") response, _ = predict(state, all_image_paths[0] if len(all_image_paths) > 0 else None, max_input_tiles, temperature, max_new_tokens, top_p, repetition_penalty, do_sample) # response = "This is a test response" buffer = "" for new_text in response: buffer += new_text state.update_message(Conversation.ASSISTANT, buffer + state.streaming_placeholder, None) yield ( state, state.to_gradio_chatbot(), gr.MultimodalTextbox(interactive=False), ) + (disable_btn,) * 5 except Exception as e: logger.error(f"Error in ai_bot: {e} \n{traceback.format_exc()}") state.update_message(Conversation.ASSISTANT, server_error_msg, None) yield ( state, state.to_gradio_chatbot(), gr.MultimodalTextbox(interactive=True), ) + ( disable_btn, disable_btn, disable_btn, enable_btn, enable_btn, ) return ai_response = state.return_last_message() logger.info(f"==== AI response ====\n{ai_response}") state.end_of_current_turn() yield ( state, state.to_gradio_chatbot(), gr.MultimodalTextbox(interactive=True), ) + (enable_btn,) * 5 finish_tstamp = time.time() logger.info(f"{buffer}") data = { "tstamp": round(finish_tstamp, 4), "like": None, "model": model_name, "start": round(start_tstamp, 4), "finish": round(start_tstamp, 4), "state": state.dict(), "images": all_image_paths, "ip": request.client.host, } write2file(get_log_filename(), json.dumps(data) + "\n") #
❄️Vintern-1B-v3_5❄️
An Efficient Multimodal Large Language Model for Vietnamese🇻🇳
[📖 Vintern Paper] [🤗 Huggingface]Vintern-1B-v3.5 is the latest in the Vintern series, bringing major improvements over v2 across all benchmarks. This continuous fine-tuning Version enhances Vietnamese capabilities while retaining strong English performance. It excels in OCR, text recognition, and Vietnam-specific document understanding.