from typing import Optional, List, Union, Dict, Any
from openai import OpenAI, AsyncOpenAI
import asyncio
from app.utils.logger import logger
from tenacity import retry, stop_after_attempt, wait_exponential
from app.config.config import settings


class LLMClient:
    def __init__(
        self, 
        temperature: float = 0.7,
        max_retries: int = 3,
        timeout: int = 30
    ):
        self.api_key = settings.OPENAI_LARGE_API_KEY
        self.base_url = settings.OPENAI_LARGE_API_BASE
        self.model = settings.LARGE_CHAT_MODEL
        self.temperature = temperature
        self.max_retries = max_retries
        self.timeout = timeout

        self.openai_large_client = OpenAI(
            api_key=self.api_key, base_url=self.base_url
        )
        self.async_openai_large_client = AsyncOpenAI(
            api_key=self.api_key, base_url=self.base_url
        )

    @retry(
        stop=stop_after_attempt(3),
        wait=wait_exponential(multiplier=1, min=4, max=10)
    )
    def get_llm_generation(self,
                   client: OpenAI,
                   model: str = "gpt-4",
                   temperature=0.5,
                   system_prompt: str = "",
                   user_prompt: str = "",
                   is_stream: bool = True
                   ):
        # 定义系统和用户提示
        SYSTEM_PROMPT = """
            Human: {}
            """.format(system_prompt)
        USER_PROMPT = user_prompt
        if is_stream:
            response = client.chat.completions.create(
                model=model,
                messages=[
                {"role": "system", "content": SYSTEM_PROMPT},
                {"role": "user", "content": USER_PROMPT},
            ],
            temperature=temperature,
            stream=is_stream, # 流式输出
        )
        else:
            response = client.chat.completions.create(
                model=model,
                messages=[
                    {"role": "system", "content": SYSTEM_PROMPT},
                    {"role": "user", "content": USER_PROMPT},
                ],
                temperature=temperature,
            )
        return response

    @retry(
        stop=stop_after_attempt(3),
        wait=wait_exponential(multiplier=1, min=4, max=10)
    )
    async def get_llm_generation_async(self,
                   client: AsyncOpenAI,
                   model: str = "gpt-4",
                   temperature=0.5,
                   system_prompt: str = "",
                   user_prompt: str = ""
    ):
        # 定义系统和用户提示
        SYSTEM_PROMPT = """
            Human: {}
            """.format(system_prompt)
        USER_PROMPT = user_prompt
        response = await client.chat.completions.create(
            model=model,
            messages=[
                {"role": "system", "content": SYSTEM_PROMPT},
                {"role": "user", "content": USER_PROMPT},
            ],  
            temperature=temperature,
        )
        return response 
