from openai import OpenAI
import os
import base64
def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode("utf-8")

def inference_with_api(image_path, prompt, 
                       model_id="qwen3-vl-plus",  # 官方确认的模型ID
                       min_pixels=1024*32*32,   
                       max_pixels=4096*32*32):
    base64_image = encode_image(image_path)
    client = OpenAI(
        api_key=os.getenv("DASHSCOPE_API_KEY"),  # 从环境变量读取API密钥
        base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",  # 正式环境地址
    )

    messages=[
        {
            "role": "user",
            "content": [
                {
                    "type": "image_url",
                    "image_url": {"url": f"data:image/png;base64,{base64_image}"},
                },
                {"type": "text", "text": prompt},
            ],
        }
    ]
    completion = client.chat.completions.create(
        model=model_id,
        messages=messages,
        response_format={"type": "text"},
    )
    return completion.choices[0].message.content
image_path = "C:\\Users\\x\\Desktop\\output.png"
prompt = "请详细描述这张图片的内容，包括画面元素、颜色、场景等信息"
result = inference_with_api(image_path, prompt)
print("模型返回结果：")
print(result)