import gradio as gr
import requests
from PIL import Image
import base64
from io import BytesIO

# Ollama API 地址
OLLAMA_API = "http://localhost:11434/api/chat"

# 将 PIL 图像转换为 Base64 编码
def image_to_base64(img):
    buffered = BytesIO()
    img.save(buffered, format='png')
    return base64.b64encode(buffered.getvalue()).decode("utf-8")

# 向 Ollama 发送请求（注意格式是否支持图像）
def query_ollama(image, prompt):
    if image is None:
        return "请上传一张图片。"

    # 转换图像为 Base64
    image_b64 = image_to_base64(image)

    payload = {
        "model": "bsahane/Qwen2.5-VL:7b",  # 确保该模型已加载
        "messages": [
            {
                "role": "user",
                "content": [
                    {"type": "image_url", "image": image_b64},  # 图像数据
                    {"type": "text", "text": prompt}       # 提示词
                ]
            }
        ],
        "stream": False
    }

    try:
        response = requests.post(OLLAMA_API, json=payload)
        data = response.json()

        if "message" in data and "content" in data["message"]:
            return data["message"]["content"]
        else:
            return f"错误响应：{data}"
    except Exception as e:
        return f"调用失败：{str(e)}"

# Gradio 界面函数
def respond(image, text):
    if not text.strip():
        text = "请描述这张图片的内容。"
    return query_ollama(image, text)

# 构建 Gradio 界面
with gr.Blocks(title="Qwen-VL 图像识别 Demo") as demo:
    gr.Markdown("# 🖼️ Qwen-VL 图像理解助手\n上传图片并输入问题，我将基于图像内容回答您。")

    with gr.Row():
        image_input = gr.Image(type="pil", label="上传图片")
        text_input = gr.Textbox(label="您的问题", placeholder="例如：图中有什么？描述一下这个场景。")

    output_text = gr.Textbox(label="模型的回答")

    submit_btn = gr.Button("提交")
    submit_btn.click(fn=respond, inputs=[image_input, text_input], outputs=output_text)

# 启动应用
if __name__ == '__main__':
    demo.launch()