import json
import os
import re
from typing import Optional

import dotenv
from openai import OpenAI
from pydantic import BaseModel

__all__ = ["instruct"]


class Time(BaseModel):
    prefilling: float = 0.0
    generation: float = 0.0


class LLMResponse(BaseModel):
    content: str
    time: Time = Time()
    reasoning_content: str = ""

    def __str__(self):
        return self.content

    def parse_json(self, is_list: bool = False):
        if is_list:
            start = "["
            end = "]"
        else:
            start = "{"
            end = "}"
        start_index = self.content.index(start)
        end_index = self.content.rindex(end)
        content = self.content[start_index : end_index + 1]
        return json.loads(content)


class OpenAIInstruct(object):
    def __init__(self, envs):
        self.api_key = envs["OPENAI_API_KEY"]
        self.base_url = envs.get("OPENAI_API_BASE", None)

        self.model_id = envs["OPENAI_MODEL_ID"]

    def __call__(
        self,
        user_input,
        system_prompt="",
        failing_default="",
        temperature=0.0,
        history=[],
    ) -> LLMResponse:
        openai_client = OpenAI(
            api_key=self.api_key,
            base_url=self.base_url,
        )
        try:
            chat_completion = openai_client.chat.completions.create(
                messages=[
                    {
                        "role": "system",
                        "content": system_prompt,
                    },
                    *history,
                    {
                        "role": "user",
                        "content": user_input,
                    },
                ],
                temperature=temperature,
                model=self.model_id,
            )
            content = chat_completion.choices[0].message.content
            if (
                hasattr(chat_completion.choices[0].message, "reasoning_content")
                and chat_completion.choices[0].message.reasoning_content
            ):
                reasoning_content = chat_completion.choices[0].message.reasoning_content
            elif match_obj := re.match(
                r"<think>(.*)</think>(.*)", content, re.DOTALL | re.MULTILINE
            ):
                reasoning_content = match_obj.group(1)
                content = match_obj.group(2)
            else:
                reasoning_content = ""
            llm_response = LLMResponse(
                content=content,
                reasoning_content=reasoning_content,
            )
            return llm_response

        except Exception as e:
            return LLMResponse(content=failing_default)


client_factory = {
    "openai": OpenAIInstruct,
}


def get_by_name(name: Optional[str] = None) -> callable:
    if name is None:
        env_file = name
    else:
        env_file = os.path.join("envs", f"{name}.env")
    envs = dotenv.dotenv_values(env_file)

    if not envs:
        return None
    client_type = envs.get("CLIENT_TYPE")

    if client_type not in client_factory:
        raise ValueError(f"Client type {client_type} not supported")
    return client_factory[client_type](envs)