    
from ast import main
from config import RagConfig
from openai import OpenAI

from rag.llms import vllm
from utils import get_b64_image_from_path

image_b64 = get_b64_image_from_path('/home/tom/my_learn/my_danwen/0_playgrounds/0_test_data/南瓜.png')

client = OpenAI(api_key=RagConfig.vllm_api_key,
                base_url=RagConfig.vllm_base_url)
messages = [
    {
        "role": "user",
        "content":
            [
                {
                    "type": "text",
                    "text": '请尽可能详细的描述你在图片中看到的所有内容，如果提到分类，描述如何分类'
                },
                {
                    "type": "image_url",
                    "image_url":{"url": f"data:image/png;base64,{image_b64}"}
                    }
            ]
        }
]
completion = client.chat.completions.create(
    model="gpt-4-vision-preview",
    messages=messages, temperature=0.5,
    seed=0, top_p=0.70, stream=False,max_tokens=300
)
print(completion.choices[0].message.content)