import gradio as gr
import json
from transformers import AutoTokenizer, AutoModelForCausalLM, AutoProcessor, AutoConfig
from PIL import Image
from GeoCLIP import GeoCLIP
import torch
from torch.nn import functional as F
from configs.config import ModelConfig, TrainConfig, PathConfig

device = "cuda:1"
processor = AutoProcessor.from_pretrained('/data/xiaoyj2025/GeoVLM/models/siglip2-so400m-patch14-384')
tokenizer = AutoTokenizer.from_pretrained('/data/xiaoyj2025/GeoVLM/src/save/instruct_en/instruct_en')
AutoConfig.register("vlm_model", ModelConfig)
AutoModelForCausalLM.register(ModelConfig, GeoCLIP)

pretrain_model = AutoModelForCausalLM.from_pretrained('/data/xiaoyj2025/GeoVLM/src/save/instruct_en/instruct_en')
pretrain_model.to(device)

#instruct_model = AutoModelForCausalLM.from_pretrained('/home/user/wyf/train_multimodal_from_scratch/save/sft')
#instruct_model.to(device)

pretrain_model.eval()
#instruct_model.eval()


# 新增对话历史管理函数
def format_history(chat_history, new_input, image_updated):
    """构建包含历史对话的prompt"""
    messages = [{"role": "system", "content": "You are an AI visual assistant, and you are seeing a single image.What you see are provided with sentences, describing the same image you are looking at. The sentences describe various objects present in the scene, their colors, relative sizes as well as relative positions on the image.Answer all questions as you are seeing the image. Design a conversation between you and a person asking about this photo. The answers should be in a tone that a visual AI assistant is seeing the image and answering the question. Ask diverse questions and give corresponding answers. Only give definite answers.Include questions asking about the visual content of the image, including the object types, counting the objects, object actions, object locations, relative positions between objects, the size of objects, color of objects, etc.(1) one can see the content in the image that the question asks about and can answer confidently.(2) one can determine confidently from the image that it is not in the image. Do not ask any question that cannot be answered confidently. Also include complex questions that are relevant to the content in the image, for example, asking about background knowledge of the objects in the image, asking to discuss about events happening in the image, etc. Again, do not ask about uncertain details.Provide detailed answers when answering complex questions. For example, give detailed examples or reasoning steps to make the content more convincing and well-organized. You can include multiple paragraphs if necessary. Do not output anything else other than the question answer pairs."}]
    
    # 添加历史对话
    i = 0
    for entry in chat_history:
        if i == 0:
            messages.append({"role": "user", "content": "<image>\n" + entry[0]})
            messages.append({"role": "assistant", "content": entry[1]})
        else:
            messages.append({"role": "user", "content": entry[0]})
            messages.append({"role": "assistant", "content": entry[1]})
        i+= 1
    
    # 处理当前输入（包含图片标记）
    current_input = new_input
    if image_updated:
        current_input += "\n<image>"
    messages.append({"role": "user", "content": current_input})
    
    return tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True
    ).replace('<image>', '<|image_pad|>' * 81)

def generate(mode, image_input, text_input, chat_history, image_state):
    # 初始化参数
    max_new_tokens = 200
    temperature = 0.1
    image_updated = image_input != image_state
    
    # 构建完整prompt
    prompt = format_history(chat_history, text_input, image_updated)
    input_ids = tokenizer(prompt, return_tensors='pt').input_ids.to(device)
    
    # 处理图像输入
    pixel_values = None
    if image_input is not None:
        pixel_values = processor(images=image_input, return_tensors="pt").pixel_values.to(device)
    
    # 手动生成token
    eos = tokenizer.eos_token_id
    generated_ids = input_ids.clone()
    for _ in range(max_new_tokens):
        with torch.no_grad():
            outputs = pretrain_model(
                input_ids=generated_ids,
                labels=None,
                pixel_values=pixel_values
            )
        
        logits = outputs.logits[:, -1, :]
        
        # 温度采样
        if temperature > 0:
            logits = logits / temperature
            probs = F.softmax(logits, dim=-1)
            next_token = torch.multinomial(probs, num_samples=1)
        else:
            next_token = torch.argmax(logits, dim=-1, keepdim=True)
        
        if next_token == eos:
            break
        
        generated_ids = torch.cat([generated_ids, next_token], dim=-1)
    
    # 解码并更新状态
    response = tokenizer.decode(generated_ids[0][input_ids.shape[1]:], skip_special_tokens=True)
    new_history = chat_history + [(text_input, response)]
    
    # 返回时清空输入，保留图片
    return "", new_history, new_history, image_input

# 创建增强版界面
with gr.Blocks() as demo:
    # 状态存储
    chat_history = gr.State([])
    image_state = gr.State(None)
    
    # 定义模型选择组件（虽然暂时隐藏但需要保留引用）
    mode = gr.Radio(["pretrain"], label="选择模型", visible=False)

    with gr.Row():
        # 输入区
        with gr.Column(scale=1):
            image_input = gr.Image(type="pil", label="上传图片")
            text_input = gr.Textbox(label="输入消息")
            submit_btn = gr.Button("发送", variant="primary")
        
        # 输出区
        with gr.Column(scale=2):
            chatbot = gr.Chatbot(height=500)
            clear_btn = gr.Button("清空对话")

    # 正确的事件绑定
    submit_btn.click(
        generate,
        inputs=[mode, image_input, text_input, chat_history, image_state],  # 使用组件对象
        outputs=[text_input, chat_history, chatbot, image_state]
    )
    
    clear_btn.click(
        lambda: ([], None),
        outputs=[chat_history, image_state],
        queue=False
    ).then(
        lambda: None,
        None,
        chatbot,
        queue=False
    )

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7891)