# encoding: utf-8
"""
链接llm模型
"""
from typing import List, Union
import openai
import numpy as np
from langchain.chat_models import ChatOpenAI
from ...config.base import TEMPERATURE, MAX_TOKENS, EMBEDDING_API_KEY, EMBEDDING_MODEL_NAME

LLM_SERVER_ADDRESS = r'https://u359243-9d78-f301a417.bjb1.seetacloud.com:8443'
API_KEY = '84e6c96fd43e40b59942b51562091334'


def create_langchain_llm_client(model_name, api_path, max_tokens, streaming):
    if model_name is None:
        model_name = get_server_llm_model_names()[0]

    if api_path is None:
        api_path = LLM_SERVER_ADDRESS + '/v1'

    llm_client = ChatOpenAI(
        streaming=streaming,
        model_name=model_name,
        openai_api_base=api_path,
        # openai_api_key="none",
        openai_api_key='84e6c96fd43e40b59942b51562091334',
        # callbacks=[StreamingStdOutCallbackHandler()],
        temperature=0.5,
        max_tokens=max_tokens,
        # verbose=True,
    )
    return llm_client


def answer_with_openai_client(messages, model_name=None, temperature=TEMPERATURE, max_tokens=MAX_TOKENS):
    if model_name is None:
        model_name = get_server_llm_model_names()[0]

    with openai.OpenAI(base_url=LLM_SERVER_ADDRESS + '/v1', api_key="none") as openai_client:
        responses = openai_client.chat.completions.create(
            model=model_name,
            messages=messages,
            temperature=temperature,
            max_tokens=max_tokens,
        )
    return responses


def create_openai_client(is_async=True):
    if is_async:
        openai_client = openai.AsyncClient(base_url=LLM_SERVER_ADDRESS + '/v1', api_key=API_KEY)
    else:
        openai_client = openai.OpenAI(base_url=LLM_SERVER_ADDRESS + '/v1', api_key=API_KEY)
    return openai_client


def get_server_llm_model_names():
    """
    获取可用的大模型名字
    :return:
    """
    # response = requests.post(CONTROLLER_ADDRESS + '/list_models')
    # long_info = json.loads(response.text)
    # llm_model_names = long_info['models']
    # llm_model_names = [_ for _ in llm_model_names if 'Qwen' in _]
    llm_model_names = ['/root/autodl-tmp/deepseek/DeepSeek-R1-Distill-Qwen-7B']
    return llm_model_names


class NetworkEmbedder(object):
    # 网路接口
    def __init__(self, dtype='bge'):
        self.dtype = dtype
        # 外挂的提示词
        if dtype == 'bge':
            self.instructions = {
                "qa": {
                    "query": "Represent this query for retrieving relevant documents: ",
                    "key": "Represent this document for retrieval: ",
                },
                "convsearch": {
                    "query": "Encode this query and context for searching relevant passages: ",
                    "key": "Encode this passage for retrieval: ",
                },
                "chat": {
                    "query": "Embed this dialogue to find useful historical dialogues: ",
                    "key": "Embed this historical dialogue for retrieval: ",
                },
                "lrlm": {
                    "query": "Embed this text chunk for finding useful historical chunks: ",
                    "key": "Embed this historical text chunk for retrieval: ",
                },
                "icl": {
                    "query": "Convert this example into vector to look for useful examples: ",
                    "key": "Convert this example into vector for retrieval: ",
                },
                "tool": {
                    "query": "Transform this user request for fetching helpful tool descriptions: ",
                    "key": "Transform this tool description for retrieval: "
                },
            }
        self.model = EMBEDDING_MODEL_NAME
        self.openai_client = openai.Client(api_key=EMBEDDING_API_KEY, base_url=EMBEDDING_SERVER_ADDRESS + '/v1')

    def _embed_func(self, text: Union[str, List[str]]):
        if isinstance(text, str):
            response = self.openai_client.embeddings.create(model=self.model, input=[text], encoding_format='float')
            return response.data[0].embedding
        else:
            response = self.openai_client.embeddings.create(model=self.model, input=text, encoding_format='float')
            return [_.embedding for _ in response.data]

    def encode_queries(self, queries: Union[List[str], str], task: str = 'qa'):
        instruction = self.instructions[task]["query"]
        if isinstance(queries, str):
            input_texts = instruction + queries
        else:
            input_texts = [instruction + q for q in queries]
        embeddings = self._embed_func(input_texts)
        return np.array(embeddings)

    def encode_keys(self, keys: Union[List[str], str],
                    task: str = 'qa') -> np.ndarray:
        instruction = self.instructions[task]["key"]

        if isinstance(keys, str):
            input_texts = instruction + keys
        else:
            input_texts = [instruction + k for k in keys]
        embeddings = self._embed_func(input_texts)
        return np.array(embeddings)