import requests
import base64
import json
from openai import OpenAI
import requests
import json
def get_message(model,image_path,text_prompt,max_tokens=256,stream=True):
    with open(image_path, 'rb') as image_file:
        image_base64 = base64.b64encode(image_file.read()).decode('utf-8')
        user_message = {
            "role": "user",
            "content": [
                {
                    "type": "image_url",
                    "image_url": {
                        "url": f"data:image/jpeg;base64,{image_base64}",
                        "detail": "low"
                    }
                },
                {
                    "type": "text",
                    "text": text_prompt
                }
            ]
        }
        # payload = {
        #     'role':
        #     'user',
        #     'content': [{
        #         'type': 'image',
        #         'image': 'http://modelscope-open.oss-cn-hangzhou.aliyuncs.com/images/animal.png'
        #     }, {
        #         'type': 'text',
        #         'text': 'How many sheep are there in the picture?'
        #     }]
        # }
        payload = {
            'model': model,
            'messages': [user_message],
            'max_tokens': max_tokens,
            'stream': stream
        }
        return payload, [user_message]

class Swift:
    base_url = 'http://10.106.150.135:8006/v1'
    api_key = '11'
    #model = 'gpt-4o-mini'
    #model = 'deepseek-ai/deepseek-vl2'
    #model = 'deepseek-vl2'
    model = 'hf_models'
#deepseek-vl2
def http_chat(API_cls, data):
    # v1 结尾
    url = API_cls.base_url + '/chat/completions'

    # 请求头
    headers = {
        'Content-Type': 'application/json',
        'Authorization': f'Bearer {API_cls.api_key}'
    }

    # 发送POST请求并启用流式响应
    response = requests.post(url, headers=headers, data=json.dumps(data), stream=True)

    # 检查响应状态码
    if response.status_code == 200:
        # 逐块接收响应内容
        for chunk in response.iter_lines():
            c = chunk.decode('utf-8').strip()[6:]
            # print(c)
            if 'choices' in c:
                d = json.loads(c)
                if d['choices']:
                    if 'delta' in d['choices'][0]:
                        if 'content' in d['choices'][0]['delta']:
                            cont = d['choices'][0]['delta']['content']
                            print(cont  , end='')

    else:
        print(f'Error: {response.status_code}')
        print(response.text)


def single_turn_chat(messages, model_cls):
    """
    openai 实现
    :param messages:
    :param model_cls:
    :return:
    """
    client = OpenAI(api_key=model_cls.api_key, base_url=model_cls.base_url)
    print('请求single turn')
    response = client.chat.completions.create(
        # model="gpt-4o-mini",
        model=model_cls.model,  # "deepseek-ai/deepseek-vl2",
        messages=messages,
        stream=True
    )
    res = ''
    for chunk in response:
        if chunk.choices[0].delta.content:
            res += chunk.choices[0].delta.content
            print(chunk.choices[0].delta.content, end='')
    return res


def run_vl(image_path):
    """
    将本地图片以 Base64 编码嵌入 JSON 中发送到 gpt-4o-mini 模型的 API。

    参数:
        image_path (str): 本地图片的路径。

    返回:
        dict: API 返回的响应数据。
    """
    try:

        payload,messages = get_message(Swift.model,image_path,'这是什么')
        print(messages)
        # 替换为你的OpenAI API密钥
        #single_turn_chat( messages=messages,model_cls=LocalApi)
        # 发送 POST 请求
        http_chat(Swift, payload)
    

    except Exception as e:
        print(f"请求过程中发生错误: {e}")
        return None


# 示例用法
if __name__ == "__main__":
    # 本地图片路径
    image_path =r"E:\aigc\qw.jpg"
    # 调用函数发送图片
    response = run_vl(image_path)

    #CUDA_VISIBLE_DEVICES=4   swift deploy --model  /home/llm/hf_models --model_type deepseek_vl2
    # 支持ms-swift
