from typing import Optional, List

from langchain_community.chat_models import ChatOpenAI
from langchain_community.llms.ollama import Ollama
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.language_models import BaseChatModel
from loguru import logger

from ai_engine.core.llm.volcengine_ark_chat_model import VolcengineArkChatModel
from ai_engine.core.model.base import ModelKwargs, LlmApiType


class BaseService:
    model_kwargs: ModelKwargs

    def __init__(
            self,
            model_kwargs: ModelKwargs = None
    ):
        self.model_kwargs = model_kwargs
        logger.info("model_kwargs", self.model_kwargs)

    def get_llm(
            self,
            temperature: float = 0.0,
            top_p: float = 0.0,
            stream: bool = False,
            callback_manager: BaseCallbackHandler = None,
            max_tokens: Optional[int] = None,
            stop: Optional[List[str]] = None,
            verbose: bool = False,
            request_timeout: int = 10
    ) -> BaseChatModel:

        llm = None
        if self.model_kwargs.api_type == LlmApiType.OPENAI.value:
            llm = ChatOpenAI(
                openai_api_base=self.model_kwargs.api_base,
                openai_api_key=self.model_kwargs.api_key,
                model_name=self.model_kwargs.model_name,
                temperature=temperature,
                streaming=stream,
                callback_manager=callback_manager,
                max_tokens=max_tokens,
                max_retries=3,
                # 请求超时单位秒
                request_timeout=request_timeout,
                verbose=verbose,
                stop=stop
            )
        elif self.model_kwargs.api_type == LlmApiType.OLLAMA.value:
            llm = Ollama(base_url=self.model_kwargs.api_base,
                         model="llama3",
                         temperature=temperature,
                         timeout=request_timeout)

        elif self.model_kwargs.api_type == LlmApiType.VOLCENGINEARK.value:
            llm = VolcengineArkChatModel(volc_ak=self.model_kwargs.api_key,
                                         volc_sk=self.model_kwargs.api_secret_key,
                                         model=self.model_kwargs.model_name,
                                         streaming=stream,
                                         temperature=temperature,
                                         top_p=top_p,
                                         max_tokens=max_tokens,
                                         timeout=request_timeout,
                                         callback_manager=callback_manager,
                                         verbose=verbose,
                                         stop=stop)
        return llm
