# coding=utf-8

import base64
import requests
from openai import OpenAI


# SYSTEM_PROMPT = \
# """
# You are a helpful and harmless assistant. You should think step-by-step.
# """
SYSTEM_PROMPT = \
"""
你是一位严谨专业的图片分析助手，你需要基于以下要求描述图片内容。
1. 详细描述图片内容，包含所有细节
"""

# 指定不同模型供应商具体VL模型
# TODO 可尝作为工具选项配置
VL_MODEL = {
    "https://ai.gitee.com/v1": "Qwen3-VL-30B-A3B-Instruct",
    "https://api-inference.modelscope.cn/v1": "Qwen/Qwen3-VL-235B-A22B-Instruct",
}

def _url_to_base64(image_url):
    """从URL获取图片并转换为base64编码"""
    try:
        # 发送GET请求获取图片
        response = requests.get(image_url, timeout=10)
        response.raise_for_status()  # 如果状态码不是200会抛出异常

        # 将图片内容转换为base64编码
        base64_image = base64.b64encode(response.content).decode('utf-8')
        return base64_image
    except requests.exceptions.RequestException as e:
        print(f"[DocImageAlt] 获取图片失败: {e}")
        return None
    except Exception as e:
        print(f"[DocImageAlt] 图片编码失败: {e}")
        return None


def call_vl_model(image_url: str, base_url: str, access_token: str) -> str:
    # 从URL获取图片并转换为base64
    base64_image = _url_to_base64(image_url)
    # 如果获取图片失败，则退出
    if base64_image is None:
        print("[DocImageAlt] Base64Image为空，无法获取图片")
    
    client = OpenAI(
        base_url=base_url,
        api_key=access_token,
    )
    response = client.chat.completions.create(
        messages=[
            {
                "role": "system",
                "content": SYSTEM_PROMPT,
            },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        "image_url": {
                            "url": f"data:image/jpeg;base64,{base64_image}"
                        }
                        # "image_url": {
                        # 	"url": "https://pic.rmb.bdstatic.com/bjh/250424/dump/aef9afdbfff4e63e346c96b12a3d3d1b.png"
                        # }
                    },
                    {
                        "type": "text",
                        "text": "请描述图片内容"
                    }
                ]
            }
        ],
        model=VL_MODEL[base_url],
        stream=False,
        max_tokens=8192,
        temperature=0.7,
        top_p=1,
        extra_body={
            "top_k": 1,
        },
        frequency_penalty=0,
    )

    # 如果设置流式传输 stream=True
    # fullResponse = ""
    # print("Response:")
    # # Print streaming response
    # for chunk in response:
    #     if len(chunk.choices) == 0:
    #         continue
    #     delta = chunk.choices[0].delta
    #     # If is thinking content, print it in gray
    #     if hasattr(delta, 'reasoning_content') and delta.reasoning_content:
    #         fullResponse += delta.reasoning_content
    #         print(f"\033[90m{delta.reasoning_content}\033[0m", end="", flush=True)
    #     elif delta.content:
    #         fullResponse += delta.content
    #         print(delta.content, end="", flush=True)
    # return fullResponse

    # 如果设置非流式传输 stream=False
    return response.choices[0].message.content
