File size: 7,035 Bytes
70dccca
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
import subprocess
subprocess.run('pip install flash-attn==2.7.0.post2 --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)

import spaces
import os
import re
import logging
from typing import List, Any
from threading import Thread

import torch
import gradio as gr
from transformers import AutoModelForCausalLM, TextIteratorStreamer

model_name = 'AIDC-AI/Ovis2-4B'
use_thread = True

# load model
model = AutoModelForCausalLM.from_pretrained(model_name,
                                             torch_dtype=torch.bfloat16,
                                             multimodal_max_length=8192,
                                             trust_remote_code=True).to(device='cuda')
text_tokenizer = model.get_text_tokenizer()
visual_tokenizer = model.get_visual_tokenizer()
streamer = TextIteratorStreamer(text_tokenizer, skip_prompt=True, skip_special_tokens=True)
image_placeholder = '<image>'
cur_dir = os.path.dirname(os.path.abspath(__file__))

logging.getLogger("httpx").setLevel(logging.WARNING)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def initialize_gen_kwargs():
    return {
        "max_new_tokens": 1536,
        "do_sample": False,
        "top_p": None,
        "top_k": None,
        "temperature": None,
        "repetition_penalty": 1.05,
        "eos_token_id": model.generation_config.eos_token_id,
        "pad_token_id": text_tokenizer.pad_token_id,
        "use_cache": True
    }

def submit_chat(chatbot, text_input):
    response = ''
    chatbot.append((text_input, response))
    return chatbot ,''

@spaces.GPU
def ovis_chat(chatbot: List[List[str]], image_input: Any):
    conversations, model_inputs = prepare_inputs(chatbot, image_input)
    gen_kwargs = initialize_gen_kwargs()

    with torch.inference_mode():
        generate_func = lambda: model.generate(**model_inputs, **gen_kwargs, streamer=streamer)
        
        if use_thread:
            thread = Thread(target=generate_func)
            thread.start()
        else:
            generate_func()

        response = ""
        for new_text in streamer:
            response += new_text
            chatbot[-1][1] = response
            yield chatbot

        if use_thread:
            thread.join()

    log_conversation(chatbot)

    
def prepare_inputs(chatbot: List[List[str]], image_input: Any):
    # conversations = [{
    #     "from": "system",
    #     "value": "You are a helpful assistant, and your task is to provide reliable and structured responses to users."
    # }]
    conversations= []

    for query, response in chatbot[:-1]:
        conversations.extend([
            {"from": "human", "value": query},
            {"from": "gpt", "value": response}
        ])
    
    last_query = chatbot[-1][0].replace(image_placeholder, '')
    conversations.append({"from": "human", "value": last_query})

    if image_input is not None:
        for conv in conversations:
            if conv["from"] == "human":
                conv["value"] = f'{image_placeholder}\n{conv["value"]}'
                break

    logger.info(conversations)
    
    prompt, input_ids, pixel_values = model.preprocess_inputs(conversations, [image_input], max_partition=16)
    attention_mask = torch.ne(input_ids, text_tokenizer.pad_token_id)
    
    model_inputs = {
        "inputs": input_ids.unsqueeze(0).to(device=model.device),
        "attention_mask": attention_mask.unsqueeze(0).to(device=model.device),
        "pixel_values": [pixel_values.to(dtype=visual_tokenizer.dtype, device=visual_tokenizer.device)] if image_input is not None else [None]
    }
    
    return conversations, model_inputs

def log_conversation(chatbot):
    logger.info("[OVIS_CONV_START]")
    [print(f'Q{i}:\n {request}\nA{i}:\n {answer}') for i, (request, answer) in enumerate(chatbot, 1)]
    logger.info("[OVIS_CONV_END]")

def clear_chat():
    return [], None, ""

with open(f"{cur_dir}/resource/logo.svg", "r", encoding="utf-8") as svg_file:
    svg_content = svg_file.read()
font_size = "2.5em"
svg_content = re.sub(r'(<svg[^>]*)(>)', rf'\1 height="{font_size}" style="vertical-align: middle; display: inline-block;"\2', svg_content)
html = f"""
<p align="center" style="font-size: {font_size}; line-height: 1;">
    <span style="display: inline-block; vertical-align: middle;">{svg_content}</span>
    <span style="display: inline-block; vertical-align: middle;">{model_name.split('/')[-1]}</span>
</p>
<center><font size=3><b>Ovis</b> has been open-sourced on <a href='https://huggingface.co/{model_name}'>😊 Huggingface</a> and <a href='https://github.com/AIDC-AI/Ovis'>🌟 GitHub</a>. If you find Ovis useful, a like❤️ or a star🌟 would be appreciated.</font></center>
"""

latex_delimiters_set = [{
        "left": "\\(",
        "right": "\\)",
        "display": False 
    }, {
        "left": "\\begin{equation}",
        "right": "\\end{equation}",
        "display": True 
    }, {
        "left": "\\begin{align}",
        "right": "\\end{align}",
        "display": True
    }, {
        "left": "\\begin{alignat}",
        "right": "\\end{alignat}",
        "display": True
    }, {
        "left": "\\begin{gather}",
        "right": "\\end{gather}",
        "display": True
    }, {
        "left": "\\begin{CD}",
        "right": "\\end{CD}",
        "display": True
    }, {
        "left": "\\[",
        "right": "\\]",
        "display": True
    }]

text_input = gr.Textbox(label="prompt", placeholder="Enter your text here...", lines=1, container=False)
with gr.Blocks(title=model_name.split('/')[-1], theme=gr.themes.Ocean()) as demo:
    gr.HTML(html)
    with gr.Row():
        with gr.Column(scale=3):
            image_input = gr.Image(label="image", height=350, type="pil")
            gr.Examples(
                examples=[
                    [f"{cur_dir}/examples/ovis2_math2.png", "Find the area of the shaded region."],
                    [f"{cur_dir}/examples/ovis2_figure2.png", "What is net profit margin as a percentage of total revenue?"],
                    [f"{cur_dir}/examples/ovis2_table0.png", "Convert the table to markdown."],
                    [f"{cur_dir}/examples/ovis2_ocr0.jpeg", "OCR:"],
                ],
                inputs=[image_input, text_input]
            )
        with gr.Column(scale=7):
            chatbot = gr.Chatbot(label="Ovis", layout="panel", height=600, show_copy_button=True, latex_delimiters=latex_delimiters_set)
            text_input.render()
            with gr.Row():
                send_btn = gr.Button("Send", variant="primary")
                clear_btn = gr.Button("Clear", variant="secondary")

    send_click_event = send_btn.click(submit_chat, [chatbot, text_input], [chatbot, text_input]).then(ovis_chat,[chatbot, image_input],chatbot)
    submit_event = text_input.submit(submit_chat, [chatbot, text_input], [chatbot, text_input]).then(ovis_chat,[chatbot, image_input],chatbot)
    clear_btn.click(clear_chat, outputs=[chatbot, image_input, text_input])

demo.launch()