from typing import Optional, Any, List

from langchain_community.embeddings import XinferenceEmbeddings
from llama_index.embeddings.langchain import LangchainEmbedding
from llama_index.llms.openai_like import OpenAILike

from server.module_chat.chat.core.output_parsers.deepseek_output_parse import DeepSeekOutputParse
from server.settings import Settings
from xinference_client import RESTfulClient

from server.utils.url_util import get_base_url


class MyXinferenceEmbeddings(XinferenceEmbeddings):
    def __init__(
            self, server_url: Optional[str] = None, api_key: Optional[str] = None, model_uid: Optional[str] = None,
    ):
        super().__init__(server_url=server_url, model_uid=model_uid)
        self.client = RESTfulClient(server_url, api_key)


class MyXinferenceRerank:
    client: Any
    server_url: Optional[str]
    """URL of the xinference server"""
    api_key: Optional[str]
    """api_key of the xinference server"""
    model_uid: Optional[str]
    """UID of the launched model"""
    top_n: int = 10

    def __init__(
            self, server_url: Optional[str] = None, api_key: Optional[str] = None, model_uid: Optional[str] = None
    ):
        try:
            from xinference.client import RESTfulClient
        except ImportError:
            try:
                from xinference_client import RESTfulClient
            except ImportError as e:
                raise ImportError(
                    "Could not import RESTfulClient from xinference. Please install it"
                    " with `pip install xinference` or `pip install xinference_client`."
                ) from e

        if server_url is None:
            raise ValueError("Please provide server URL")

        if model_uid is None:
            raise ValueError("Please provide the model UID")

        self.server_url = server_url

        self.api_key = api_key

        self.model_uid = model_uid

        self.client = RESTfulClient(server_url, api_key)

    def rerank(self, query: str, documents: List[str], top_n: int):
        if top_n is None:
            top_n = self.top_n

        model = self.client.get_model(self.model_uid)
        response = model.rerank(query, documents, top_n)
        return response


llm = OpenAILike(
    api_base=Settings.model_settings.DEFAULT_API_BASE_URL,
    api_key=Settings.model_settings.DEFAULT_API_KEY,  # vllm 启动时没有配置，默认就是 EMPTY
    model=Settings.model_settings.DEFAULT_LLM_MODEL,
    is_chat_model=True,
    timeout=600
)

output_parse = DeepSeekOutputParse()
no_think_llm = OpenAILike(
    api_base=Settings.model_settings.DEFAULT_API_BASE_URL,
    api_key=Settings.model_settings.DEFAULT_API_KEY,  # vllm 启动时没有配置，默认就是 EMPTY
    model=Settings.model_settings.DEFAULT_LLM_MODEL,
    is_chat_model=True,
    timeout=600,
    output_parser=output_parse
)

my_xinference_embeddings = MyXinferenceEmbeddings(
    server_url=get_base_url(Settings.model_settings.DEFAULT_API_BASE_URL),
    api_key=Settings.model_settings.DEFAULT_API_KEY,
    model_uid=Settings.model_settings.DEFAULT_EMBEDDING_MODEL
)
embedding_model = LangchainEmbedding(my_xinference_embeddings)

rerank_model = MyXinferenceRerank(
    server_url=get_base_url(Settings.model_settings.DEFAULT_API_BASE_URL),
    api_key=Settings.model_settings.DEFAULT_API_KEY,
    model_uid=Settings.model_settings.DEFAULT_RERANK_MODEL,
)
