"""
LLM管理器模块

负责不同大模型提供商的统一接口管理，提供标准化的模型调用接口。
支持多种模型提供商，包括OpenAI、DeepSeek、Ollama等。

主要功能：
1. 统一的模型调用接口
2. 多提供商支持
3. 流式和非流式响应
4. 连接测试和错误处理
5. 配置管理和验证

设计模式：
- 抽象基类：定义统一的接口规范
- 具体实现：各提供商的特定实现
- 管理器模式：统一管理和调度

作者: LLM Chat System
版本: 1.0.0
"""

from typing import List, Dict, Any, Optional, AsyncGenerator, TYPE_CHECKING

if TYPE_CHECKING:
    from app.models.model_config import ModelConfig
import asyncio
import json
from abc import ABC, abstractmethod

# 延迟导入避免循环依赖
# from app.models.model_config import "ModelConfig"
from app.core.config import settings


class BaseLLMProvider(ABC):
    """
    LLM提供商抽象基类
    
    定义所有LLM提供商必须实现的接口规范。
    确保不同提供商的实现具有统一的调用方式。
    """
    
    @abstractmethod
    async def chat_completion(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """
        聊天完成接口（非流式）
        
        调用大模型生成完整的回复内容。
        
        Args:
            model_config: 模型配置信息
            messages: 消息列表，格式为 [{"role": "user", "content": "..."}]
            
        Returns:
            Dict[str, Any]: 包含回复内容、token统计、元数据等
        """
        pass
    
    @abstractmethod
    async def chat_completion_stream(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """
        流式聊天完成接口
        
        调用大模型生成流式回复内容，实时返回生成的内容片段。
        
        Args:
            model_config: 模型配置信息
            messages: 消息列表，格式为 [{"role": "user", "content": "..."}]
            
        Yields:
            Dict[str, Any]: 包含内容片段、token统计等
        """
        pass
    
    @abstractmethod
    async def generate_image(
        self,
        model_config: "ModelConfig",
        prompt: str,
        image_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """
        图片生成接口
        
        调用图片生成模型生成图片。
        
        Args:
            model_config: 模型配置信息
            prompt: 图片生成提示词
            image_settings: 图片生成设置（尺寸、质量等）
            
        Returns:
            Dict[str, Any]: 包含图片URL、元数据等
        """
        pass


class OpenAIProvider(BaseLLMProvider):
    """OpenAI提供商"""
    
    async def chat_completion(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """OpenAI聊天完成"""
        try:
            import openai
            
            client = openai.AsyncOpenAI(
                api_key=model_config.api_key,
                base_url=model_config.base_url
            )
            
            # 使用前端传递的模型设置，如果没有则使用模型配置的默认值
            temperature = model_settings.get('temperature') if model_settings and 'temperature' in model_settings else float(model_config.temperature)
            max_tokens = model_settings.get('max_tokens') if model_settings and 'max_tokens' in model_settings else model_config.max_tokens
            top_p = model_settings.get('top_p') if model_settings and 'top_p' in model_settings else float(model_config.top_p)
            frequency_penalty = model_settings.get('frequency_penalty') if model_settings and 'frequency_penalty' in model_settings else float(model_config.frequency_penalty)
            presence_penalty = model_settings.get('presence_penalty') if model_settings and 'presence_penalty' in model_settings else float(model_config.presence_penalty)
            
            response = await client.chat.completions.create(
                model=model_config.model_name,
                messages=messages,
                temperature=temperature,
                max_tokens=max_tokens,
                top_p=top_p,
                frequency_penalty=frequency_penalty,
                presence_penalty=presence_penalty
            )
            
            return {
                "content": response.choices[0].message.content,
                "token_count": response.usage.total_tokens if response.usage else 0,
                "metadata": {
                    "model": response.model,
                    "finish_reason": response.choices[0].finish_reason
                }
            }
        except Exception as e:
            raise Exception(f"OpenAI API调用失败: {str(e)}")
    
    async def chat_completion_stream(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """OpenAI流式聊天完成"""
        try:
            import openai
            
            client = openai.AsyncOpenAI(
                api_key=model_config.api_key,
                base_url=model_config.base_url
            )
            
            # 使用前端传递的模型设置，如果没有则使用模型配置的默认值
            temperature = model_settings.get('temperature') if model_settings and 'temperature' in model_settings else float(model_config.temperature)
            max_tokens = model_settings.get('max_tokens') if model_settings and 'max_tokens' in model_settings else model_config.max_tokens
            top_p = model_settings.get('top_p') if model_settings and 'top_p' in model_settings else float(model_config.top_p)
            frequency_penalty = model_settings.get('frequency_penalty') if model_settings and 'frequency_penalty' in model_settings else float(model_config.frequency_penalty)
            presence_penalty = model_settings.get('presence_penalty') if model_settings and 'presence_penalty' in model_settings else float(model_config.presence_penalty)
            
            stream = await client.chat.completions.create(
                model=model_config.model_name,
                messages=messages,
                temperature=temperature,
                max_tokens=max_tokens,
                top_p=top_p,
                frequency_penalty=frequency_penalty,
                presence_penalty=presence_penalty,
                stream=True
            )
            
            async for chunk in stream:
                if chunk.choices[0].delta.content:
                    yield {
                        "content": chunk.choices[0].delta.content,
                        "token_count": 1  # 估算
                    }
        except Exception as e:
            raise Exception(f"OpenAI流式API调用失败: {str(e)}")
    
    async def generate_image(
        self,
        model_config: "ModelConfig",
        prompt: str,
        image_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """OpenAI图片生成"""
        try:
            import openai
            
            client = openai.AsyncOpenAI(
                api_key=model_config.api_key,
                base_url=model_config.base_url
            )
            
            # 获取图片生成设置
            size = image_settings.get('size', '1024x1024') if image_settings else '1024x1024'
            quality = image_settings.get('quality', 'standard') if image_settings else 'standard'
            n = image_settings.get('n', 1) if image_settings else 1
            style = image_settings.get('style') if image_settings else None
            
            # 构建请求参数
            request_params = {
                "model": model_config.model_name,
                "prompt": prompt,
                "size": size,
                "quality": quality,
                "n": n
            }
            
            # 如果指定了风格，添加到请求中
            if style:
                request_params["style"] = style
            
            response = await client.images.generate(**request_params)
            
            # 处理响应
            images = []
            for image in response.data:
                images.append({
                    "url": image.url,
                    "revised_prompt": getattr(image, 'revised_prompt', None)
                })
            
            return {
                "images": images,
                "metadata": {
                    "model": model_config.model_name,
                    "size": size,
                    "quality": quality,
                    "n": n
                }
            }
            
        except Exception as e:
            raise Exception(f"OpenAI图片生成API调用失败: {str(e)}")


class DeepSeekProvider(BaseLLMProvider):
    """DeepSeek提供商"""
    
    async def chat_completion(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """DeepSeek聊天完成"""
        try:
            import openai
            
            client = openai.AsyncOpenAI(
                api_key=model_config.api_key,
                base_url=model_config.base_url or "https://api.deepseek.com"
            )
            
            # 使用前端传递的模型设置，如果没有则使用模型配置的默认值
            temperature = model_settings.get('temperature') if model_settings and 'temperature' in model_settings else float(model_config.temperature)
            max_tokens = model_settings.get('max_tokens') if model_settings and 'max_tokens' in model_settings else model_config.max_tokens
            
            response = await client.chat.completions.create(
                model=model_config.model_name,
                messages=messages,
                temperature=temperature,
                max_tokens=max_tokens
            )
            
            return {
                "content": response.choices[0].message.content,
                "token_count": response.usage.total_tokens if response.usage else 0,
                "metadata": {
                    "model": response.model,
                    "finish_reason": response.choices[0].finish_reason
                }
            }
        except Exception as e:
            raise Exception(f"DeepSeek API调用失败: {str(e)}")
    
    async def chat_completion_stream(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """DeepSeek流式聊天完成"""
        try:
            import openai
            
            client = openai.AsyncOpenAI(
                api_key=model_config.api_key,
                base_url=model_config.base_url or "https://api.deepseek.com"
            )
            
            # 使用前端传递的模型设置，如果没有则使用模型配置的默认值
            temperature = model_settings.get('temperature') if model_settings and 'temperature' in model_settings else float(model_config.temperature)
            max_tokens = model_settings.get('max_tokens') if model_settings and 'max_tokens' in model_settings else model_config.max_tokens
            
            stream = await client.chat.completions.create(
                model=model_config.model_name,
                messages=messages,
                temperature=temperature,
                max_tokens=max_tokens,
                stream=True
            )
            
            async for chunk in stream:
                if chunk.choices[0].delta.content:
                    yield {
                        "content": chunk.choices[0].delta.content,
                        "token_count": 1  # 估算
                    }
        except Exception as e:
            raise Exception(f"DeepSeek流式API调用失败: {str(e)}")
    
    async def generate_image(
        self,
        model_config: "ModelConfig",
        prompt: str,
        image_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """DeepSeek图片生成（暂不支持）"""
        raise Exception("DeepSeek暂不支持图片生成功能")


class OllamaProvider(BaseLLMProvider):
    """Ollama提供商（本地部署）"""
    
    async def chat_completion(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """Ollama聊天完成"""
        try:
            import httpx
            
            base_url = model_config.base_url or "http://localhost:11434"
            
            # 使用前端传递的模型设置，如果没有则使用模型配置的默认值
            temperature = model_settings.get('temperature') if model_settings and 'temperature' in model_settings else float(model_config.temperature)
            max_tokens = model_settings.get('max_tokens') if model_settings and 'max_tokens' in model_settings else model_config.max_tokens
            
            async with httpx.AsyncClient() as client:
                response = await client.post(
                    f"{base_url}/api/chat",
                    json={
                        "model": model_config.model_name,
                        "messages": messages,
                        "stream": False,
                        "options": {
                            "temperature": temperature,
                            "num_predict": max_tokens
                        }
                    },
                    timeout=model_config.timeout
                )
                response.raise_for_status()
                result = response.json()
                
                return {
                    "content": result["message"]["content"],
                    "token_count": result.get("eval_count", 0),
                    "metadata": {
                        "model": result["model"],
                        "done": result["done"]
                    }
                }
        except Exception as e:
            raise Exception(f"Ollama API调用失败: {str(e)}")
    
    async def chat_completion_stream(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """Ollama流式聊天完成"""
        try:
            import httpx
            
            base_url = model_config.base_url or "http://localhost:11434"
            
            # 使用前端传递的模型设置，如果没有则使用模型配置的默认值
            temperature = model_settings.get('temperature') if model_settings and 'temperature' in model_settings else float(model_config.temperature)
            max_tokens = model_settings.get('max_tokens') if model_settings and 'max_tokens' in model_settings else model_config.max_tokens
            
            async with httpx.AsyncClient() as client:
                async with client.stream(
                    "POST",
                    f"{base_url}/api/chat",
                    json={
                        "model": model_config.model_name,
                        "messages": messages,
                        "stream": True,
                        "options": {
                            "temperature": temperature,
                            "num_predict": max_tokens
                        }
                    },
                    timeout=model_config.timeout
                ) as response:
                    response.raise_for_status()
                    async for line in response.aiter_lines():
                        if line.strip():
                            try:
                                chunk = json.loads(line)
                                if chunk.get("message", {}).get("content"):
                                    yield {
                                        "content": chunk["message"]["content"],
                                        "token_count": 1
                                    }
                            except json.JSONDecodeError:
                                continue
        except Exception as e:
            raise Exception(f"Ollama流式API调用失败: {str(e)}")
    
    async def generate_image(
        self,
        model_config: "ModelConfig",
        prompt: str,
        image_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """Ollama图片生成（暂不支持）"""
        raise Exception("Ollama暂不支持图片生成功能")


class ZhipuProvider(BaseLLMProvider):
    """智谱AI提供商"""
    
    async def chat_completion(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """智谱AI聊天完成（暂不支持）"""
        raise Exception("智谱AI暂不支持文本生成，请使用图片生成功能")
    
    async def chat_completion_stream(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """智谱AI流式聊天完成（暂不支持）"""
        raise Exception("智谱AI暂不支持文本生成，请使用图片生成功能")
    
    async def generate_image(
        self,
        model_config: "ModelConfig",
        prompt: str,
        image_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """智谱AI图片生成"""
        try:
            import httpx
            
            # 获取图片生成设置
            size = image_settings.get('size', '1024x1024') if image_settings else '1024x1024'
            quality = image_settings.get('quality', 'standard') if image_settings else 'standard'
            n = image_settings.get('n', 1) if image_settings else 1
            
            # 构建请求参数
            request_data = {
                "model": model_config.model_name,
                "prompt": prompt,
                "size": size,
                "quality": quality,
                "n": n
            }
            
            async with httpx.AsyncClient() as client:
                response = await client.post(
                    model_config.base_url,
                    json=request_data,
                    headers={
                        "Authorization": f"Bearer {model_config.api_key}",
                        "Content-Type": "application/json"
                    },
                    timeout=model_config.timeout
                )
                response.raise_for_status()
                result = response.json()
                
                # 处理响应
                images = []
                if 'data' in result:
                    for image_data in result['data']:
                        images.append({
                            "url": image_data.get('url', ''),
                            "revised_prompt": image_data.get('revised_prompt')
                        })
                
                return {
                    "images": images,
                    "metadata": {
                        "model": model_config.model_name,
                        "size": size,
                        "quality": quality,
                        "n": n
                    }
                }
                
        except Exception as e:
            raise Exception(f"智谱AI图片生成API调用失败: {str(e)}")


class LLMManager:
    """LLM管理器"""
    
    def __init__(self):
        self.providers = {
            "openai": OpenAIProvider(),
            "deepseek": DeepSeekProvider(),
            "ollama": OllamaProvider(),
            "zhipu": ZhipuProvider()
        }
    
    def get_provider(self, provider_name: str) -> BaseLLMProvider:
        """获取提供商实例"""
        provider = self.providers.get(provider_name.lower())
        if not provider:
            raise ValueError(f"不支持的提供商: {provider_name}")
        return provider
    
    async def chat_completion(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """聊天完成"""
        provider = self.get_provider(model_config.provider)
        
        # 如果有额外的模型设置，应用它们
        if model_settings:
            model_config = self._apply_model_settings(model_config, model_settings)
        
        return await provider.chat_completion(model_config, messages, model_settings)
    
    async def chat_completion_stream(
        self, 
        model_config: "ModelConfig", 
        messages: List[Dict[str, str]],
        model_settings: Optional[Dict[str, Any]] = None
    ) -> AsyncGenerator[Dict[str, Any], None]:
        """流式聊天完成"""
        provider = self.get_provider(model_config.provider)
        
        # 如果有额外的模型设置，应用它们
        if model_settings:
            model_config = self._apply_model_settings(model_config, model_settings)
        
        async for chunk in provider.chat_completion_stream(model_config, messages, model_settings):
            yield chunk
    
    async def generate_image(
        self,
        model_config: "ModelConfig",
        prompt: str,
        image_settings: Optional[Dict[str, Any]] = None
    ) -> Dict[str, Any]:
        """图片生成"""
        provider = self.get_provider(model_config.provider)
        return await provider.generate_image(model_config, prompt, image_settings)
    
    async def test_connection(self, model_config: "ModelConfig") -> Dict[str, Any]:
        """测试模型连接"""
        try:
            test_messages = [{"role": "user", "content": "Hello"}]
            response = await self.chat_completion(model_config, test_messages)
            return {
                "success": True,
                "message": "连接测试成功",
                "response": response["content"][:100]
            }
        except Exception as e:
            return {
                "success": False,
                "message": f"连接测试失败: {str(e)}"
            }
    
    def _apply_model_settings(
        self,
        model_config: "ModelConfig",
        model_settings: Dict[str, Any]
    ) -> "ModelConfig":
        """应用模型设置参数"""
        
        # 创建模型配置的副本，避免修改原始配置
        from copy import deepcopy
        updated_config = deepcopy(model_config)
        
        # 应用温度设置
        if model_settings.get('temperature') is not None:
            updated_config.temperature = str(model_settings['temperature'])
        
        # 应用最大token数设置
        if model_settings.get('max_tokens') is not None:
            updated_config.max_tokens = model_settings['max_tokens']
        
        # 应用top_p设置
        if model_settings.get('top_p') is not None:
            updated_config.top_p = str(model_settings['top_p'])
        
        # 应用频率惩罚设置
        if model_settings.get('frequency_penalty') is not None:
            updated_config.frequency_penalty = str(model_settings['frequency_penalty'])
        
        # 应用存在惩罚设置
        if model_settings.get('presence_penalty') is not None:
            updated_config.presence_penalty = str(model_settings['presence_penalty'])
        
        return updated_config
