from typing import List

from openai import AsyncOpenAI

from config.config import get_doubao_flash_config
from core.llm.base import LLMProviderBase
from core.llm.factory import ModelType
from core.utils.log import mylogger


class LLMProvider(LLMProviderBase):
    def __init__(self, model_type=ModelType.DOUBAO_FLASH, timeout=None):
        if model_type != ModelType.DOUBAO_FLASH:
            raise ValueError(f"此提供者只支持DOUBAO_FLASH模型，收到: {model_type}")

        self.model_type = model_type
        self.timeout = timeout

        doubao_flash_config = get_doubao_flash_config()
        client_timeout = self.timeout if self.timeout is not None else 60
        self.client = AsyncOpenAI(
            api_key=doubao_flash_config.api_key,
            base_url=doubao_flash_config.base_url,
            timeout=client_timeout,
        )
        self.model = doubao_flash_config.model
        self.model_name = "doubaoFlash"

        mylogger.info(f"LLM using model: {self.model} (type: {self.model_type.value})")

    async def chat(
        self,
        messages: List[dict],
        temperature: float = 0.7,
        frequency_penalty: float = 0.0,
        presence_penalty: float = 0.0,
    ) -> str:
        # 构建请求参数
        request_params = {
            "model": self.model,
            "messages": messages,
            "stream": False,
            "temperature": temperature,
            "frequency_penalty": frequency_penalty,
            "presence_penalty": presence_penalty,
        }

        response = await self.client.chat.completions.create(**request_params)
        return response.choices[0].message.content

    async def chat_stream(
        self,
        messages: List[dict],
        temperature: float = 0.7,
        frequency_penalty: float = 0.0,
        presence_penalty: float = 0.0,
    ):
        mylogger.info(f"Chat request: {messages}")

        # 构建请求参数
        request_params = {
            "model": self.model,
            "messages": messages,
            "stream": True,
            "temperature": temperature,
            "frequency_penalty": frequency_penalty,
            "presence_penalty": presence_penalty,
        }

        stream = await self.client.chat.completions.create(**request_params)
        mylogger.info(f"Chat response: {stream}")

        async for chunk in stream:
            if not chunk.choices:
                continue
            content = chunk.choices[0].delta.content
            if content:
                yield content
