from openai import OpenAI
import os
import base64

API_KEY = os.environ['QWEN_API_KEY']
BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"


systemPrompt = """当前应用：
你正在扮演Buddy，一个智能电子助理。

你的回复风格：
请你总是给出一个简短、友好的回答，模仿一只电子助理的回答风格，不要超过100字。"""


def callVLM(imgData, question):
    base64_image = base64.b64encode(imgData).decode("utf-8")
    client = OpenAI(
        api_key= API_KEY,
        base_url= BASE_URL
    )
    completion = client.chat.completions.create(
        model="qwen-vl-max-latest",
        messages=[
            { "role": "system", "content": [{"type": "text", "text": systemPrompt}] },
            {
                "role": "user",
                "content": [
                    {
                        "type": "image_url",
                        # 使用格式化字符串 (f-string) 创建一个包含 BASE64 编码图像数据的字符串。
                        "image_url": {"url": f"data:image/jpeg;base64,{base64_image}"}, 
                    },
                    {"type": "text", "text": question},
                ],
            }
        ],
        max_tokens=100,
    )
    return (completion.choices[0].message.content)

def callLLM(question):
    client = OpenAI(
        api_key= API_KEY,
        base_url= BASE_URL
    )
    
    completion = client.chat.completions.create(
        model="qwen-max-latest",
        messages=[
            { "role": "system", "content": [{"type": "text", "text": systemPrompt}] },
            {
                "role": "user",
                "content": [
                    {"type": "text", "text": question},
                ],
            }
        ],
        max_tokens=100,
    )
    return (completion.choices[0].message.content)

if __name__ == "__main__":
    with open("tmp/photo.jpg", "rb") as f:
        imgData = f.read()
    print(callLLM( "请问你的名字是什么？"))
    print(callVLM(imgData, "识别一下你从摄像头看到的内容。"))