import gradio as gr
import torch
from PIL import Image
from transformers import AutoModel, AutoTokenizer
from config import des

# 初始化模型和分词器
model = AutoModel.from_pretrained('hf-models/MiniCPM-Llama3-V-2_5', trust_remote_code=True, torch_dtype=torch.float16)
model = model.to(device='cuda')
tokenizer = AutoTokenizer.from_pretrained('hf-models/MiniCPM-Llama3-V-2_5', trust_remote_code=True)
model.eval()
# 定义默认的系统消息
system_message = "你是一名医疗AI助手。请详细且提供信息丰富的答案。如果你不知道某个具体医疗问题的答案，请建议寻求专业帮助。"
def predict(image, question):
    # 转换图像为 RGB 格式
    if image is not None:
        image = image.convert('RGB')
    else:
        image = None
    
    # 构造消息
    msgs = [{'role': 'user', 'content': question}]
    
    # 使用模型生成回答
    res = model.chat(
        image=image,
        msgs=msgs,
        tokenizer=tokenizer,
        sampling=True,
        temperature=0.7,
        system_message=system_message
    )
    
    return res



# 创建 Gradio 接口
demo = gr.Interface(
    fn=predict,
    inputs=[gr.Image(type="pil"), gr.Textbox(placeholder="上传检查图片，提问")],
    outputs="text",
    title="多模态个人健康助理",
    description=des()
)

# 启动 Gradio 接口
demo.launch(server_name='0.0.0.0' )
