from typing import (
    List, 
    TypeVar, 
    Type, 
    Union, 
    Callable, 
    Tuple,
    Generic
)

import os

from langchain_openai import ChatOpenAI
from langchain_core.runnables import Runnable
from langchain_core.rate_limiters import InMemoryRateLimiter
from langchain.chat_models import init_chat_model
from pydantic import BaseModel

from langchain_core.prompts import (
    ChatPromptTemplate,
    HumanMessagePromptTemplate,
    SystemMessagePromptTemplate,
    PromptTemplate,
)

from exceptions import VariableValidationError

from loguru import logger

_BM = TypeVar("_BM", bound=BaseModel)


class LLMFunction:
    def __init__(
        self, chain: Runnable, name: str, max_tries: int, variables: List[str], bm: Type[_BM]
    ):
        self.chain = chain
        self.name = name
        self.MAX_TRIES = max_tries
        self.variables = list(set(variables + ["input"]))
        self.bm = bm

    def _variables_validate(self, **kwargs):
        s1 = set(self.variables)
        s2 = set(kwargs.keys())

        if s := s2 - s1:
            raise VariableValidationError(
                f"Variables not presented in the prompt: {s}"
            )
        elif s := s1 - s2:
            raise VariableValidationError(
                f"Variables in the prompt not given: {s}"
            )

    def _convert(self, text) -> _BM:
        from langchain_core.utils.json import parse_json_markdown
        from json import JSONDecodeError
        text = text.strip()
        try:
            # Markdown 格式输出的JSON格式解析，支持部分JSON格式的解析，这个对于streaming=True时非常有用
            json = parse_json_markdown(text)
            return self.bm(**json)
        except JSONDecodeError as e:
            logger.error(f"Invalid json output: {text}")
            raise e

    def invoke(self, **kwargs):
        self._variables_validate(**kwargs)

        tries = 0

        while True:
            try:
                result = self.chain.invoke(kwargs)
                logger.debug(
                    f"Call {self.name} with kwargs{kwargs} \n result: \n {result}\n"
                )
                return result if not self.bm else self._convert(result.content)
            except Exception as e:
                tries += 1
                logger.error(
                    f"Call {self.name} {tries} {kwargs} with exception: {e} "
                )
                if tries >= self.MAX_TRIES:
                    return "failed"
                
    async def ainvoke(self, **kwargs):
        self._variables_validate(**kwargs)

        tries = 0

        while True:
            try:
                result = await self.chain.ainvoke(kwargs)
                logger.debug(
                    f"Call {self.name} with kwargs{kwargs} \n result: \n {result}\n"
                )
                return result if not self.bm else self._convert(result.content)
            except Exception as e:
                tries += 1
                logger.error(
                    f"Call {self.name} {tries} {kwargs} with exception: {e} "
                )
                if tries >= self.MAX_TRIES:
                    return "failed"


class LLM:
    def __init__(self):
        self.llm = self._create_llm()

    def _create_llm(self) -> ChatOpenAI:
        openai_api_base = os.environ["OPENAI_CHAT_ENDPOINT"]
        model = os.environ["OPENAI_CHAT_MODEL"]
        key = os.environ["OPENAI_CHAT_KEY"]
        request_per_second = int(
            os.environ.get("RATE_LIMIT_REQUEST_PER_SECOND", 100)
        )
        check_every_n_seconds = float(
            os.environ.get("RATE_LIMIT_CHECK_INTERVAL", 0.1)
        )
        max_bucket_size = int(
            os.environ.get("RATE_LIMIT_MAX_BUCKET_SIZE", 1000)
        )

        rate_limiter = InMemoryRateLimiter(
            requests_per_second=request_per_second,
            check_every_n_seconds=check_every_n_seconds,
            max_bucket_size=max_bucket_size,
        )

        chatopenai = ChatOpenAI(
            temperature=0.0,
            base_url=openai_api_base,
            model=model,
            api_key=key,
            streaming=False,
            max_tokens=3980,
            # rate_limiter=rate_limiter,
        )
        return chatopenai

    def _create_parse_prompt(
        self, prompts: List[str], additional_variables: List[str] = []
    ) -> ChatPromptTemplate:
        system_prompt = "\n".join(filter(None, prompts))

        system_prompt = PromptTemplate(
            template=system_prompt,
            input_variables=additional_variables,
        )

        system_message_prompt = SystemMessagePromptTemplate(
            prompt=system_prompt
        )

        human_string_parts = ["输入如下：\n{input}"]

        human_prompt_string = "\n".join(filter(None, human_string_parts))
        human_prompt = PromptTemplate(
            template=human_prompt_string,
            input_variables=["input"],
        )

        human_message_prompt = HumanMessagePromptTemplate(prompt=human_prompt)

        return ChatPromptTemplate.from_messages(
            [system_message_prompt, human_message_prompt]
        )

    def _safe_bracket(self, json: dict) -> str:
        return f"{json}".replace("{", "{{").replace("}", "}}")

    def create(
        self,
        name: str,
        func: Union[
            Callable[[], Tuple[List[str], List[str]]], Tuple[str, List[str]]
        ],
        bm: Type[_BM] = None,
    ) -> LLMFunction:
        if isinstance(func, Callable):
            lines, variables = func()
        else:
            line, variables = func
            lines = [line]
        
        if bm:
            desc = self._safe_bracket(bm.model_json_schema())
            lines.append(f"请输出JSON格式，Schema为：{desc}")

        template = self._create_parse_prompt(
            prompts=lines, additional_variables=variables
        )

        return LLMFunction(
            template | self.llm,
            name,
            max_tries=int(os.environ.get("MAX_TRIES", 3)),
            variables=variables,
            bm=bm
        )
