import os
import re
import time
from typing import List, Optional

import dotenv
from openai import OpenAI
from pydantic import BaseModel
from sparkai.llm.llm import ChatMessage, ChatSparkLLM

__all__ = ["instruct"]


def parse_five_suggestions(content: str) -> List[str]:
    obj = re.search(
        r"1[.](.*?)2[.](.*?)3[.](.*?)4[.](.*?)5[.](.*)$",
        content,
        re.DOTALL | re.MULTILINE,
    )
    if obj is None:
        return ["", "", "", "", ""]
    else:
        items = []
        for line in [obj.group(i).strip() for i in range(1, 6)]:
            line = line.strip()
            line = re.sub(r'^["“](.*)["”]$', r"\1", line)
            line = re.sub(r"\s+", " ", line)
            items.append(line)
        return items


class Time(BaseModel):
    prefilling: float = 0.0
    generation: float = 0.0


class LLMResponse(BaseModel):
    content: str
    time: Time = Time()
    reasoning_content: str = ""
    five_suggestions: List[str] = []

    def __str__(self):
        return self.content


class SparkInstruct(object):
    def __init__(self, envs):
        self.params = dict(
            spark_api_url=envs["SPARKAI_URL"],
            spark_app_id=envs["SPARKAI_APP_ID"],
            spark_api_key=envs["SPARKAI_API_KEY"],
            spark_api_secret=envs["SPARKAI_API_SECRET"],
            spark_llm_domain=envs["SPARKAI_DOMAIN"],
        )

    def __call__(
        self,
        user_input,
        system_prompt="",
        failing_default="",
        temperature=0.0,
        return_time=False,
        history=[],
    ) -> LLMResponse:
        spark_client = ChatSparkLLM(
            **self.params,
            request_timeout=60,
            streaming=False,
        )
        if system_prompt:
            messages = [ChatMessage(role="system", content=system_prompt)]
        else:
            messages = []

        for message in history:
            messages.append(
                ChatMessage(role=message["role"], content=message["content"])
            )
        messages.append(ChatMessage(role="user", content=user_input))

        tm = Time()

        start = time.time()
        try:
            response = spark_client.stream(messages, temperature=temperature)
            raw_text = ""
            for i, message in enumerate(response):
                if i == 0:
                    tm.prefilling = time.time() - start
                raw_text += message.content

            tm.generation = time.time() - start - tm.prefilling
        except Exception as e:
            raw_text = failing_default

        if return_time:
            return LLMResponse(content=raw_text, time=tm)
        else:
            return LLMResponse(content=raw_text)


class OpenAIInstruct(object):
    def __init__(self, envs):
        self.api_key = envs["OPENAI_API_KEY"]
        self.base_url = envs.get("OPENAI_API_BASE", None)

        self.model_id = envs["OPENAI_MODEL_ID"]

    def __call__(
        self,
        user_input,
        system_prompt="",
        failing_default="",
        temperature=0.0,
        history=[],
    ) -> LLMResponse:
        openai_client = OpenAI(
            api_key=self.api_key,
            base_url=self.base_url,
        )
        try:
            chat_completion = openai_client.chat.completions.create(
                messages=[
                    {
                        "role": "system",
                        "content": system_prompt,
                    },
                    *history,
                    {
                        "role": "user",
                        "content": user_input,
                    },
                ],
                temperature=temperature,
                model=self.model_id,
            )
            content = chat_completion.choices[0].message.content
            if hasattr(chat_completion.choices[0].message, "reasoning_content") and chat_completion.choices[0].message.reasoning_content:
                reasoning_content = chat_completion.choices[0].message.reasoning_content
            elif match_obj := re.match(r"<think>(.*)</think>(.*)", content, re.DOTALL | re.MULTILINE):
                reasoning_content = match_obj.group(1)
                content = match_obj.group(2)
            else:
                reasoning_content = ""
            five_suggestions = parse_five_suggestions(content)
            llm_response = LLMResponse(
                content=content,
                reasoning_content=reasoning_content,
                five_suggestions=five_suggestions,
            )
            return llm_response

        except Exception as e:
            return LLMResponse(content=failing_default)


client_factory = {
    "sparkai": SparkInstruct,
    "openai": OpenAIInstruct,
}


def get_by_name(name: Optional[str] = None) -> callable:
    if name is None:
        env_file = name
    else:
        env_file = os.path.join("envs", f"{name}.env")
    envs = dotenv.dotenv_values(env_file)

    if not envs:
        return None
    client_type = envs.get("CLIENT_TYPE")

    if client_type not in client_factory:
        raise ValueError(f"Client type {client_type} not supported")
    return client_factory[client_type](envs)


instruct = get_by_name()


if __name__ == "__main__":
    response = instruct("帮我写一首诗")
    print(response)
