"""
LLM客户端
"""

from typing import List, Dict, Any, Optional, AsyncGenerator, Union
import httpx
import json
import asyncio
from dotenv import load_dotenv
import os
import logging

# 简化的Settings类
class Settings:
    def __init__(self):
        load_dotenv()
        self.openai_api_key = os.getenv("OPENAI_API_KEY", "sk-5d821c708e7746e38fdef3642ad12953")
        self.openai_api_base = os.getenv("OPENAI_API_BASE", "https://api.deepseek.com/v1")
        self.model_name = os.getenv("DEFAULT_MODEL", "deepseek-chat")
        self.deepseek_api_key = self.openai_api_key
        self.deepseek_api_base = self.openai_api_base
        self.temperature = 0.7
        self.max_tokens = 2000

# 简化的logger设置
def setup_logger(settings=None):
    logger = logging.getLogger("llm_client")
    if not logger.handlers:
        handler = logging.StreamHandler()
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        handler.setFormatter(formatter)
        logger.addHandler(handler)
        logger.setLevel(logging.INFO)
    return logger

class LLMClient:
    """LLM客户端，支持多种模型"""
    
    def __init__(self, model_type="openai", api_key=None):
        """初始化LLM客户端
        
        Args:
            model_type: 模型类型，支持openai、azure等
            api_key: API密钥，如果为None则从环境变量获取
        """
        self.settings = Settings()
        self.logger = setup_logger(self.settings)
        self.model_type = model_type
        
        # 初始化对话历史
        self.conversation_history = []
            
        # 根据模型类型初始化客户端
        if model_type == "openai":
            api_key = api_key or self.settings.openai_api_key
            api_base = self.settings.openai_api_base
            model = self.settings.model_name
            
            if "deepseek" in api_base:
                self.logger.info(f"使用DeepSeek API: {api_base}, 模型: {model}")
                model = "deepseek-chat"
            
            self.client = DeepSeekClient(self.settings)
            self.logger.info(f"OpenAI客户端初始化成功，使用端点: {api_base}")
        else:
            raise ValueError(f"不支持的模型类型: {model_type}")
    
    
    async def generate_text_stream(self, messages, system_prompt=None):
        """流式生成文本
        
        Args:
            messages: 消息列表，每个元素为{"role": "user" | "assistant", "content": str}
            system_prompt: 系统提示词，用于指导模型的回复风格
            
        Yields:
            生成的文本片段
        """
        try:
            
            # 如果提供了系统提示词，添加到消息列表的开头
            if system_prompt:
                messages = [{"role": "system", "content": system_prompt}] + messages
            
            # 记录发送给大模型的输入
            self.logger.info(f"发送给大模型的流式输入: {json.dumps(messages, ensure_ascii=False)}")
            
            # 调用流式API
            async for chunk in self.client.chat_completion_stream(messages=messages):
                yield chunk
                
        except Exception as e:
            self.logger.error(f"流式生成文本失败: {e}")
            yield f"抱歉，生成文本时出现错误: {str(e)}"
    
    def _prepare_messages(self, prompt, history=None):
        """准备消息
        
        Args:
            prompt: 提示词
            history: 历史对话
            
        Returns:
            准备好的消息列表
        """
        messages = []
        
        # 添加历史消息
        if history:
            for msg in history:
                if msg.get("role") and msg.get("content"):
                    messages.append({
                        "role": msg["role"],
                        "content": msg["content"]
                    })
        
        # 添加当前提示词
        messages.append({
            "role": "user",
            "content": prompt
        })
        
        return messages

class DeepSeekClient:
    """DeepSeek API客户端"""
    
    def __init__(self, settings: Settings):
        self.settings = settings
        self.logger = setup_logger(settings)
        self.api_key = settings.deepseek_api_key
        self.api_base = settings.deepseek_api_base
        self.model = settings.model_name
        
        if not self.api_key:
            raise ValueError("DeepSeek API密钥未配置")
        
        self.client = httpx.AsyncClient(
            base_url=self.api_base,
            headers={
                "Authorization": f"Bearer {self.api_key}",
                "Content-Type": "application/json"
            },
            timeout=30.0
        )
    
    
    async def chat_completion_stream(
        self,
        messages: List[Dict[str, str]],
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
        voice_id: Optional[str] = None,
    ) -> AsyncGenerator[str, None]:
        """流式聊天完成API调用
        
        Args:
            messages: 对话消息列表
            temperature: 温度参数
            max_tokens: 最大生成token数
            voice_id: 语音ID，用于同时生成语音
        """
        
        # 修正模型名称格式
        model_name = "deepseek-chat" if ":" in self.model else self.model
        
        try:
            from openai import AsyncOpenAI
            
            # 创建 OpenAI 客户端
            client = AsyncOpenAI(
                api_key=self.api_key,
                base_url=self.api_base
            )
            
            self.logger.info(f"使用 OpenAI 客户端发送流式请求，模型: {model_name}")
            
            # 创建流式响应
            stream = await client.chat.completions.create(
                model=model_name,
                messages=messages,
                temperature=temperature or self.settings.temperature,
                max_tokens=max_tokens or self.settings.max_tokens,
                stream=True
            )
            
            # 处理流式响应
            async for chunk in stream:
                if chunk.choices and chunk.choices[0].delta.content:
                    content = chunk.choices[0].delta.content
                    yield content
                    
        except ImportError as e:
            self.logger.error(f"导入 OpenAI 客户端库失败: {e}")
            yield f"导入 OpenAI 客户端库失败: {str(e)}"
        except Exception as e:
            self.logger.error(f"流式API调用失败: {e}")
            yield f"流式API调用失败: {str(e)}"
    
