import base64
import os.path
import datetime
import requests
# 引入Qwen2-VL-72B-Instruct
from openai import OpenAI

# Set OpenAI's API key and API base to use vLLM's API server.
vllm_model_name = "qwen2-vl-72b-instruct"
openai_api_key = "EMPTY"
openai_api_base = "http://10.107.105.207:48030/v1"
local_api_key = "TCTPTEST"
local_model_urls = "https://q.test-adm.weoa.com/oacgs-bms-a/api"
# local_model_urls = "http://127.0.0.1:9315/"
temperature = 0.0
request_timeout = 90

client = OpenAI(
    api_key=openai_api_key,
    base_url=openai_api_base,
)


def log_runtimes(func):
    def log(*args, **kwargs):
        before_time = datetime.datetime.now()
        result = func(*args, **kwargs)
        after_time = datetime.datetime.now()
        print('{} method cost {}'.format(func.__name__, (after_time - before_time).seconds))
        return result

    return log


def encode_image(image_path):
    with open(image_path, "rb") as image_file:
        return base64.b64encode(image_file.read()).decode('utf-8')


@log_runtimes
def inference_chat(chat, model, api_url, token):
    messages = []
    for role, content in chat:
        messages.append({"role": role, "content": content})

    try:
        chat_response = client.chat.completions.create(model=vllm_model_name, messages=messages,
                                                       temperature=temperature)
        res_content = chat_response.choices[0].message.content
    except Exception as e:
        print(str(e))
        print(str(messages)[:1000])  # 打印前1000字
    # print('inference_chat response:', res_content)

    return res_content


@log_runtimes
def generate_image_caption(image_path, mode='BLIP') -> (dict, int, int):
    headers = {
        "Authorization": local_api_key
    }
    params = {
        'mode': mode
    }
    # 上传图片路径
    # res = requests.post(url = '{}/generate_image_caption'.format(local_model_urls), params=params).json()

    # 上传图片
    with open(image_path, 'rb') as f:
        files = {'file': (os.path.basename(image_path), f)}
        res = requests.post(url='{}/generate_image_caption'.format(local_model_urls),
                            headers=headers, params=params, files=files, verify=False)
    if res.status_code != 200:
        print(res.text)
        return res.text
    res = res.json()
    print(res['perception_infos'])
    return res['perception_infos'], res['witdth'], res['height']


'''
def inference_chat(chat, model, api_url, token):    
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {token}"
    }

    data = {
        "model": model,
        "messages": [],
        "max_tokens": 2048,
        'temperature': 0.0,
        "seed": 1234
    }

    for role, content in chat:
        data["messages"].append({"role": role, "content": content})

    while True:
        try:
            res = requests.post(api_url, headers=headers, json=data)
            res_json = res.json()
            res_content = res_json['choices'][0]['message']['content']
        except:
            print("Network Error:")
            try:
                print(res.json())
            except:
                print("Request Failed")
        else:
            break

    return res_content
'''