import openai
from jinja2 import Template
from prompt import prompt_dialogue


import httpx
from typing import List, Dict, Callable, Generator, Union
from functools import partial


def configure_openai_client(url: str, api_key: str) -> None:
    openai.api_key = api_key
    openai.base_url = url
    openai.proxy = ""


class LLMClient:
    def __init__(
        self,
        url: str,
        api_key: str = "EMPTY",
        chat_mode: bool = True,
        buffer_size: int = 20,
        timeout: int = 60
    ):
        configure_openai_client(url, api_key)
        self.chat_mode = chat_mode
        self.buffer_size = buffer_size
        self.timeout = timeout
        self.llm_function = openai.chat.completions.create

    def _prepare_params(
        self,
        messages: List[Union[Dict, str]],
        stream: bool,
        **kwargs
    ) -> Dict:
        params = {
            **kwargs,
            "stream": stream,
            "timeout": self.timeout
        }
        if self.chat_mode:
            params["messages"] = messages
        else:
            params["prompt"] = messages
        return params

    def generate(
        self,
        messages: List[Union[Dict, str]],
        stream: bool = False,
        **kwargs
    ) -> Union[str, Generator[str, None, None]]:
        params = self._prepare_params(messages, stream, **kwargs)
        
        if not stream:
            response = self.llm_function(**params)
            return response.choices[0].message.content

        return self._handle_streaming_output(params)

    def _handle_streaming_output(self, params: Dict) -> Generator[str, None, None]:
        buffer = ""
        for chunk in self.llm_function(**params):
            if len(chunk.choices) > 0 and chunk.choices[0].delta.content:
                buffer += chunk.choices[0].delta.content
                if len(buffer) >= self.buffer_size:
                    yield buffer
                    buffer = ""
        if buffer:
            yield buffer


if __name__ == "__main__":
    api = {
            "name": "googleWebSearch",
            "description": (
                "A Google Search Engine. "
                "Useful when you need to search information you don't know such as weather, "
                "exchange rate, current events."
                "Never ever use this tool when user want to translate"
            ),
            "parameters": {
                "type": "object",
                "properties": {
                    "query": {
                        "type": "string",
                        "description": (
                            "Content that users want to search for, such as 'weather', 'current events', etc."
                            "If special characters such as '\n' appear in the search, "
                            "these special characters must be ignored.\n"
                            "Chinese characters are preferred."
                        ),
                    }
                },
                "required": ["query"],
            },
        }

    t = Template(prompt_dialogue)
    prompt = t.render(api="api", num_diags=5)

    model_url = "https://cloud.infini-ai.com/maas/gpt-4o-20240513/azure/"
    llm_client = LLMClient(model_url, "mass平台apikey")

    response = llm_client.generate(
        messages=[{"role": "user", "content": prompt}],
        name="",
        stream=False
    )

    # 添加后处理代码解析得到结果
