from openai import OpenAI
# Set OpenAI's API key and API base to use vLLM's API server.

# 老版本接口
import logging
import os
import re
import random
import openai
# openai.api_base = "http://192.168.17.187:8998/v1"
openai.api_base = "http://192.168.17.187:8987/v1"
openai.api_key = "none"

def get_mode_access_old(content_):
    print(content_)
    response = openai.ChatCompletion.create(
        model="DeepSeek",
        messages=[
            {"role": "user", "content": content_}
        ],
        stream=False,
        stop=[] # You can add custom stop words here, e.g., stop=["Observation:"] for ReAct prompting.
    )
    access_result = response.choices[0].message.content
    return  access_result


# 新版本 openai 接口
# def model_access(content):
#     openai_api_key = "EMPTY"
#     openai_api_base = "http://192.168.17.187:8987/v1"
#     client = OpenAI(
#         api_key=openai_api_key,
#         base_url=openai_api_base,
#     )

#     chat_response = client.chat.completions.create(
#         model="deepseek-ai/deepseek-llm-67b-chat",
#         messages=[
#             {"role": "user", "content": content}
#         ]
#     )
#     # print("Chat response:", chat_response)
#     content = chat_response.choices[0].message.content
#     return content


# 新接口
from openai import OpenAI
import openai
# openai.api_key = "none"
# openai.base_url = "http://192.168.17.xxx:xxx/v1"


def list_model():
    # from openai import OpenAI
    client = OpenAI(
        api_key=openai.api_key,
        base_url=openai.base_url,
    )

    chat_response = client.models.list()
    return chat_response

def list_model_names(reg=None):
    # from openai import OpenAI
    client = OpenAI(
        api_key=openai.api_key,
        base_url=openai.base_url,
    )

    chat_response = client.models.list()
    model_names = [v.id for v in chat_response.data]

    if reg:
        model_names = [v for v in model_names if re.search(reg, v)]

    return model_names

def random_select_model_name(model_name_reg):
    model_names_options = list_model_names(model_name_reg)
    if not model_names_options:
        return model_name_reg
    else:
        idx = random.randint(0, len(model_names_options) - 1)
        return model_names_options[idx]

def model_access(content, history_messages=[], model_name='baichuan2-chat', strict_model_name=True, presence_penalty=0.0, frequency_penalty=0.0, repetition_penalty=1.0, temperature=0.7, top_p=1.0, top_k=-1):
    """openai api 请求方法

    Args:
        content (str): 请求文本
        model_name (str, optional): 调用的 model name. Defaults to 'baichuan2-chat'.
        strict_model_name (bool, optional): 使用严格使用 model_name 选择模型，True：表示使用名称为 model name 的模型，False：将 model_name 作为 reg, 对已有模型进行匹配，随机选择一个匹配上的模型进行调用. Defaults to True.

    Returns:
        str: 模型输出
    """
    client = OpenAI(
    api_key=openai.api_key,
    base_url=openai.base_url,
    )
    if not strict_model_name:
        model_name = random_select_model_name(model_name)
    print(f"model name: {model_name}")

    chat_response = client.chat.completions.create(
        model=model_name,
        messages=history_messages + [
            {"role": "user", "content": content},
        ],
        presence_penalty=presence_penalty,
        frequency_penalty=frequency_penalty,
        # repetition_penalty=repetition_penalty,
        temperature=temperature,
        top_p=top_p
        # ,
        # top_k=top_k
        # stop=['<|im_end|>']
    )
    print("Chat response:", chat_response)
    content = chat_response.choices[0].message.content
    return content

def model_access_stream(content, history_messages=[], model_name='baichuan2-chat', strict_model_name=True, presence_penalty=0.0, frequency_penalty=0.0, repetition_penalty=1.0, temperature=0.7, top_p=1.0, top_k=-1):
    """openai api stream 请求方法

    Args:
        content (str): 请求文本
        model_name (str, optional): 调用的 model name. Defaults to 'baichuan2-chat'.
        strict_model_name (bool, optional): 使用严格使用 model_name 选择模型，True：表示使用名称为 model name 的模型，False：将 model_name 作为 reg, 对已有模型进行匹配，随机选择一个匹配上的模型进行调用. Defaults to True.

    Returns:
        str: 模型输出


    Examples:
        result = ''
        for v in model_access_stream('你是谁', strict_model_name=False):
            if v is not None:
                result += v
            print(v)
        result = result.strip(' \n')
        result
    """
    client = OpenAI(
    api_key=openai.api_key,
    base_url=openai.base_url,
    )

    if not strict_model_name:
        model_name = random_select_model_name(model_name)
    print(f"model name: {model_name}")

    for chat_response in client.chat.completions.create(
        model=model_name,
        messages=history_messages + [
            {"role": "user", "content": content},
        ],
        stream=True,
        # stop=['<|im_end|>'],
        presence_penalty=presence_penalty,
        frequency_penalty=frequency_penalty,
        # repetition_penalty=repetition_penalty,
        temperature=temperature,
        top_p=top_p
        # ,
        # top_k=top_k
    ):
        print("Chat response:", chat_response)
        content = chat_response.choices[0].delta.content
        yield content
