from typing import List
import openai
import tiktoken
import transformers
from chernc.llm.lm_inference import LanguageModelPredictor, AsyncLanguageModelPredictor
from .uni_tokenizer import UniTokenizer
from typing import Dict, Optional, Union, Generator, AsyncGenerator
from openai.types.chat.chat_completion_message import ChatCompletionMessage
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk

import logging
from chernc.logging import build_logger
from chernc.constants import DEFAULT_LOGGER_DIR, DEFAULT_LOGGER_FILE_NAME

logger: logging.Logger = build_logger(__name__, logger_filename=DEFAULT_LOGGER_FILE_NAME, logger_dir=DEFAULT_LOGGER_DIR)


class APIInference(LanguageModelPredictor):
    """
    A class for interacting with an LLM API.
    """

    def __init__(self, model_name: str, api_key: str, base_url: str = "https://api.openai.com/v1"):
        """
        Initialize the APIInference class.

        :param model_name: The name of the LLM model to use.
        :type model_name: str
        :param api_key: The API key for the LLM API.
        :type api_key: str
        :param base_url: The base URL for the LLM API.
        :type base_url: str
        """
        self.model_name: str = model_name
        self.api_key: str = api_key
        self.base_url: str = base_url
        self.client: openai.OpenAI = openai.OpenAI(api_key=self.api_key, base_url=self.base_url)
        self.tokenizer = UniTokenizer(self.model_name)

    def __repr__(self) -> str:
        """
        Return a string representation of the APIInference instance.
        """
        return f"APIInference(model_name={self.model_name}, base_url={self.base_url})"

    def token_num(self, text: Union[str, List[Dict[str, str]]]) -> int:
        """
        Return the number of tokens in the text.

        :param text: The text to count the tokens of, or a list of messages.
        :type text: str

        :return: The number of tokens in the text.
        :rtype: int
        """
        if self.tokenizer is None:
            raise Exception("The tokenizer is None")
        return self.tokenizer.token_num(text=text)

    def chat(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> Optional[str]:
        """
        Generate a response from the LLM.

        :param messages: The messages to send to the LLM.
        :type messages: List[Dict[str, str]]
        :param max_length: The maximum number of tokens in the response.
        :type max_length: int
        :param temperature: The temperature to use for the response.
        :type temperature: Optional[float]
        :param top_p: The top p to use for the response.
        :type top_p: Optional[float]

        :return: The response from the LLM.
        :rtype: Optional[str]
        """
        outputs = self.client.chat.completions.create(
            model=self.model_name,
            messages=messages,  # type: ignore
            max_tokens=max_length,
            temperature=temperature,
            top_p=top_p,
        )
        return outputs.choices[0].message.content

    def chat_stream(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> Generator[Optional[str], None, None]:
        """
        Generate a stream of responses from the LLM.

        :param messages: The messages to send to the LLM.
        :type messages: List[Dict[str, str]]
        :param max_length: The maximum number of tokens in the response.
        :type max_length: int, optional
        :param temperature: The temperature to use for the response.
        :type temperature: float, optional
        :param top_p: The top p to use for the response.
        :type top_p: float, optional

        :return: A generator of the response from the LLM.
        :rtype: Generator[Optional[str], None, None]
        """
        response: openai.Stream[ChatCompletionChunk] = self.client.chat.completions.create(
            model=self.model_name,
            messages=messages,  # type: ignore
            max_tokens=max_length,
            temperature=temperature,
            top_p=top_p,
            stream=True,  # 启用流式响应
        )
        for chunk in response:
            yield chunk.choices[0].delta.content  # 逐块返回内容


class AsyncAPIInference(AsyncLanguageModelPredictor):
    """
    A class for interacting with an LLM async API.
    """

    def __init__(self, model_name: str, api_key: str, base_url: str = "https://api.openai.com/v1", rate_limit: int = 1):
        """
        Initialize the APIInference class.

        Args:
            model_name: The name of the LLM model to use.
            api_key: The API key for the LLM API.
            base_url: The base URL for the LLM API.
        """
        self.model_name = model_name
        self.api_key = api_key
        self.base_url = base_url
        self.client = openai.AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
        self.tokenizer = UniTokenizer(self.model_name)
        self.rate_limit = rate_limit

    def __repr__(self) -> str:
        """
        Return a string representation of the AsyncAPIInference instance.
        """
        return f"AsyncAPIInference(model_name={self.model_name}, base_url={self.base_url})"

    def token_num(self, text: Union[str, List[Dict[str, str]]]) -> int:
        """
        Return the number of tokens in the text.

        Args:
            text: The text to count the tokens of, or a list of messages.

        Returns:
            The number of tokens in the text.
        """
        if self.tokenizer is None:
            raise Exception("The tokenizer is None")
        return self.tokenizer.token_num(text=text)

    async def chat(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> Optional[str]:
        """
        Async version - Generate a response from the LLM.

        Args:
            messages: The messages to send to the LLM.
            max_length: The maximum number of tokens in the response.
            temperature: The temperature to use for the response.
            top_p: The top p to use for the response.

        Returns:
            The response from the LLM.
        """
        outputs = await self.client.chat.completions.create(
            model=self.model_name,
            messages=messages,  # type: ignore
            max_tokens=max_length,
            temperature=temperature,
            top_p=top_p,
        )
        return outputs.choices[0].message.content

    async def chat_stream(
        self,
        messages: List[Dict[str, str]],
        max_length: int = 2048,
        temperature: Optional[float] = None,
        top_p: Optional[float] = None,
    ) -> AsyncGenerator[Optional[str], None]:
        """
        Async version - Generate a stream of responses from the LLM.

        Args:
            messages: The messages to send to the LLM.
            max_length: The maximum number of tokens in the response.
            temperature: The temperature to use for the response.
            top_p: The top p to use for the response.

        Returns:
            A generator of the response from the LLM.
        """
        response: openai.AsyncStream[ChatCompletionChunk] = await self.client.chat.completions.create(
            model=self.model_name,
            messages=messages,  # type: ignore
            max_tokens=max_length,
            temperature=temperature,
            top_p=top_p,
            stream=True,  # 启用流式响应
        )
        async for chunk in response:
            yield chunk.choices[0].delta.content  # 逐块返回内容
