#模型下载
# from modelscope import snapshot_download
# model_dir = snapshot_download('Qwen/Qwen2.5-VL-72B-Instruct-AWQ')
#
import os
# from openai import OpenAI
# try:
#     client = OpenAI(
#         # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
#         # api_key=os.getenv("DASHSCOPE_API_KEY"),
#         api_key = '',
#         base_url="http://192.168.3.164/v1/chat/completions",
#         # base_url="http://192.168.3.164",
#     )
#
#     completion = client.chat.completions.create(
#         model="/mnt/g/modelscope/Qwen2.5-VL-7B-Instruct-AWQ",  # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
#         messages=[
#             {'role': 'system', 'content': 'You are a helpful assistant.'},
#             {'role': 'user', 'content': '你是谁？'}
#             ]
#     )
#     print(completion.choices[0].message.content)
# except Exception as e:
#     print(f"错误信息：{e}")
#     print("请参考文档：https://help.aliyun.com/zh/model-studio/developer-reference/error-code")
#模型下载
from modelscope import snapshot_download
# model_dir = snapshot_download('Qwen/Qwen2.5-VL-7B-Instruct-AWQ')

from openai import OpenAI

# Set OpenAI's API key and API base to use vLLM's API server.
openai_api_key = "EMPTY"
# openai_api_base = "http://192.168.3.164/v1"
openai_api_base = "http://localhost:7777/v1"

client = OpenAI(
    api_key=openai_api_key,
    base_url=openai_api_base,
)

chat_response = client.chat.completions.create(
    model="/mnt/g/modelscope/Qwen2.5-VL-7B-Instruct-AWQ",
    messages=[
        {"role": "system", "content": "You are a helpful assistant."},
        {
            "role": "user",
            "content": [
                {
                    "type": "image_url",
                    "image_url": {
                        "url": "https://modelscope.oss-cn-beijing.aliyuncs.com/resource/qwen.png"
                    },
                },
                {"type": "text", "text": "What is the text in the illustrate?"},
            ],
        },
    ],
)
print("Chat response:", chat_response)