import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3"
import gradio as gr
from transformers import AutoTokenizer, Qwen2_5_VLForConditionalGeneration, AutoProcessor
from peft import PeftModel
import torch
from qwen_vl_utils import process_vision_info

# 定义模型路径
original_model_path = "/root/autodl-tmp/Qwen/Qwen2.5-VL-7B-Instruct/"
peft_model_path = "./output/Qwen2.5-VL-7B/"
device_map = {"": int(os.environ.get("LOCAL_RANK") or 0)}

# 全局变量
model = None
tokenizer = None
processor = None
is_finetuned = False  # 标记当前是否使用微调模型

# 加载模型和 tokenizer, processor
def load_model():
    global model, tokenizer, is_finetuned, processor
    tokenizer = AutoTokenizer.from_pretrained(original_model_path, use_fast=True)
    min_pixels = 256 * 28 * 28
    max_pixels = 1280 * 28 * 28
    processor = AutoProcessor.from_pretrained(
        original_model_path, min_pixels=min_pixels, max_pixels=max_pixels, use_fast=True
    )
    model = Qwen2_5_VLForConditionalGeneration.from_pretrained(
        original_model_path, torch_dtype=torch.bfloat16, device_map=device_map
    )
    is_finetuned = False  # 默认加载原始模型
    return "原始模型加载成功！"

# 切换模型模式
def toggle_model_mode():
    global model, is_finetuned
    if model is None:
        return "请先加载模型！"
    
    if is_finetuned:
        # 禁用 LoRA adapter，使用原始模型
        model.disable_adapter()
        is_finetuned = False
        return "切换到原始模型模式！"
    else:
        # 加载 LoRA adapter，使用微调模型
        model.load_adapter(peft_model_path, adapter_name="lora")
        is_finetuned = True
        return "切换到微调模型模式！"

# 生成回答
def generate_response(prompt):
    if model is None or tokenizer is None:
        return "请先加载模型！"
    
    # 根据模型模式构建 messages
    if is_finetuned:
        # 微调模型使用特定的 system prompt
        messages = [
            {
                "role": "system",
                "content": "Answer the question truthfully, you are a medical professional.",
            },
            {
                "role": "user",
                "content": prompt,
            }
        ]
    else:
        # 原始模型使用默认的 system prompt
        messages = [
            {"role": "system", "content": "You are a helpful assistant."},
            {"role": "user", "content": prompt},
        ]
    
    # 处理输入
    text = processor.apply_chat_template(
        messages, tokenize=False, add_generation_prompt=True
    )
    image_inputs, video_inputs = None, None  # 如果没有视觉信息，直接设置为 None
    
    inputs = processor(
        text=[text],  # 将 text 包装成列表
        images=image_inputs,
        videos=video_inputs,
        padding=True,
        return_tensors="pt",
    )

    device = next(model.parameters()).device
    # 将所有张量移动到指定的设备上
    for key, value in inputs.items():
        inputs[key] = value.to(device)
    
    # 生成输出
    generated_ids = model.generate(**inputs, max_new_tokens=512, do_sample=True)
    generated_ids_trimmed = [
        out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs["input_ids"], generated_ids)
    ]
    output_text = tokenizer.batch_decode(
        generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
    )
    
    return output_text[0]

# Gradio 界面
with gr.Blocks() as demo:
    gr.Markdown("# Qwen2.5-VL 模型测试")
    
    with gr.Row():
        load_model_btn = gr.Button("加载模型")
        toggle_mode_btn = gr.Button("切换模型模式")
    
    with gr.Row():
        model_status = gr.Textbox(label="模型状态", interactive=False)
    
    with gr.Row():
        prompt_input = gr.Textbox(label="输入问题", placeholder="请输入你的问题...")
        generate_btn = gr.Button("生成回答")
    
    with gr.Row():
        response_output = gr.Textbox(label="模型回答", interactive=False)
    
    # 绑定按钮事件
    load_model_btn.click(load_model, outputs=model_status)
    toggle_mode_btn.click(toggle_model_mode, outputs=model_status)
    generate_btn.click(generate_response, inputs=prompt_input, outputs=response_output)

# 启动 Gradio 应用
demo.launch(share=True)