"""
大模型服务模块
集成OpenAI、Azure OpenAI、Anthropic、通义千问等大模型API
"""
import json
import time
import asyncio
from typing import Dict, Any, List, Optional, Union, AsyncGenerator
from dataclasses import dataclass, asdict
from enum import Enum
import aiohttp

from core.config import settings
from core.logging_config import get_logger
from core.exceptions import AIServiceError, ValidationError

logger = get_logger("llm_service")


class LLMProvider(str, Enum):
    """大模型提供商"""
    OPENAI = "openai"
    AZURE_OPENAI = "azure"
    ANTHROPIC = "anthropic"
    QWEN = "qwen"
    ZHIPU = "zhipu"


class LLMModelType(str, Enum):
    """模型类型"""
    GPT_35_TURBO = "gpt-3.5-turbo"
    GPT_4 = "gpt-4"
    GPT_4_TURBO = "gpt-4-turbo"
    CLAUDE_3_SONNET = "claude-3-sonnet-20240229"
    CLAUDE_3_OPUS = "claude-3-opus-20240229"
    QWEN_TURBO = "qwen-turbo"
    QWEN_PLUS = "qwen-plus"
    QWEN_MAX = "qwen-max"
    QWEN3_32B = "qwen3-32b"
    GLM_4 = "glm-4"
    GLM_4_AIR = "glm-4-air"
    GLM_4_FLASH = "glm-4-flash"
    GLM_3_TURBO = "glm-3-turbo"
    GLM_4_5_FLASH = "glm-4.5-flash"
    glm_z1_flash = "glm-z1-flash"


@dataclass
class LLMMessage:
    """LLM消息"""
    role: str  # system, user, assistant
    content: str
    name: Optional[str] = None


@dataclass
class LLMRequest:
    """LLM请求"""
    messages: List[LLMMessage]
    model: str
    temperature: float = 0.7
    max_tokens: int = 2000
    stream: bool = False
    think: str = "disabled"
    stop: Optional[List[str]] = None
    top_p: Optional[float] = None
    frequency_penalty: Optional[float] = None
    presence_penalty: Optional[float] = None
    response_format: Optional[str] = "json_object"


@dataclass
class LLMResponse:
    """LLM响应"""
    id: str
    object: str
    created: int
    model: str
    choices: List[Dict[str, Any]]
    content: str
    usage: Dict[str, int]
    system_fingerprint: Optional[str] = None
    processing_time: float = 0.0


@dataclass
class LLMStreamChunk:
    """LLM流式响应块"""
    id: str
    object: str
    created: int
    model: str
    choices: List[Dict[str, Any]]
    finish_reason: Optional[str] = None


class LLMService:
    """大模型服务基类"""
    
    def __init__(self, provider: LLMProvider):
        self.provider = provider
        self.api_key = self._get_api_key()
        self.api_base = self._get_api_base()
        self.model = self._get_model()
        self.timeout = settings.llm_timeout
        
        # HTTP客户端会话
        self._session: Optional[aiohttp.ClientSession] = None
        
    async def __aenter__(self):
        """异步上下文管理器入口"""
        await self._ensure_session()
        return self
        
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        """异步上下文管理器出口"""
        if self._session:
            await self._session.close()
    
    async def _ensure_session(self):
        """确保HTTP会话存在"""
        if self._session is None or self._session.closed:
            headers = self._get_headers()
            timeout = aiohttp.ClientTimeout(total=self.timeout)
            self._session = aiohttp.ClientSession(headers=headers, timeout=timeout)
    
    def _get_api_key(self) -> str:
        """获取API密钥"""
        if self.provider == LLMProvider.OPENAI:
            return settings.llm_api_key
        elif self.provider == LLMProvider.AZURE_OPENAI:
            return settings.llm_api_key
        elif self.provider == LLMProvider.ANTHROPIC:
            return settings.llm_api_key
        elif self.provider == LLMProvider.QWEN:
            return settings.qwen_api_key
        elif self.provider == LLMProvider.ZHIPU:
            return getattr(settings, 'zhipu_api_key', '')
        else:
            raise ValidationError(f"不支持的提供商: {self.provider}")
    
    def _get_api_base(self) -> str:
        """获取API基础URL"""
        if self.provider == LLMProvider.OPENAI:
            return settings.llm_api_base
        elif self.provider == LLMProvider.AZURE_OPENAI:
            return settings.llm_api_base
        elif self.provider == LLMProvider.ANTHROPIC:
            return "https://api.anthropic.com"
        elif self.provider == LLMProvider.QWEN:
            return "https://dashscope.aliyuncs.com/api/v1"
        elif self.provider == LLMProvider.ZHIPU:
            return "https://open.bigmodel.cn/api/paas/v4"
        else:
            raise ValidationError(f"不支持的提供商: {self.provider}")
    
    def _get_model(self) -> str:
        """获取模型名称"""
        if self.provider == LLMProvider.OPENAI:
            return settings.llm_model
        elif self.provider == LLMProvider.AZURE_OPENAI:
            return settings.azure_openai_deployment_name or settings.llm_model
        elif self.provider == LLMProvider.ANTHROPIC:
            return settings.llm_model
        elif self.provider == LLMProvider.QWEN:
            return settings.qwen_model
        elif self.provider == LLMProvider.ZHIPU:
            return settings.zhipu_model
        else:
            raise ValidationError(f"不支持的提供商: {self.provider}")
    
    def _get_headers(self) -> Dict[str, str]:
        """获取请求头"""
        headers = {"Content-Type": "application/json"}
        
        if self.provider == LLMProvider.OPENAI:
            headers["Authorization"] = f"Bearer {self.api_key}"
        elif self.provider == LLMProvider.AZURE_OPENAI:
            headers["Authorization"] = f"Bearer {self.api_key}"
            headers["api-key"] = self.api_key
        elif self.provider == LLMProvider.ANTHROPIC:
            headers["x-api-key"] = self.api_key
            headers["anthropic-version"] = "2023-06-01"
        elif self.provider == LLMProvider.QWEN:
            headers["Authorization"] = f"Bearer {self.api_key}"
        elif self.provider == LLMProvider.ZHIPU:
            headers["Authorization"] = f"Bearer {self.api_key}"
        
        return headers
    
    async def chat_completion(self, request: LLMRequest) -> LLMResponse:
        """聊天补全"""
        raise NotImplementedError("子类必须实现此方法")
    
    async def chat_completion_stream(self, request: LLMRequest) -> AsyncGenerator[LLMStreamChunk, None]:
        """流式聊天补全"""
        raise NotImplementedError("子类必须实现此方法")
    
    async def health_check(self) -> Dict[str, Any]:
        """健康检查"""
        try:
            logger.info(f"开始LLM服务健康检查: {self.provider.value}")
            
            # 简单的健康检查
            test_request = LLMRequest(
                messages=[LLMMessage(role="user", content="请回复'健康'")],
                model=self.model,
                max_tokens=10
            )
            
            # 添加超时机制，防止网络请求无限等待
            try:
                response = await asyncio.wait_for(
                    self.chat_completion(test_request),
                    timeout=10.0  # 10秒超时
                )
                
                logger.info(f"LLM服务健康检查成功: {self.provider.value}")
                return {
                    "healthy": True,
                    "provider": self.provider.value,
                    "model": self.model,
                    "response_time": response.processing_time,
                    "message": "服务正常"
                }
            except asyncio.TimeoutError:
                logger.error(f"LLM服务健康检查超时: {self.provider.value}")
                return {
                    "healthy": False,
                    "provider": self.provider.value,
                    "model": self.model,
                    "error": "健康检查超时"
                }
            
        except Exception as e:
            logger.error(f"LLM服务健康检查失败: {self.provider.value}, 错误: {str(e)}")
            return {
                "healthy": False,
                "provider": self.provider.value,
                "model": self.model,
                "error": str(e)
            }


class OpenAIService(LLMService):
    """OpenAI服务"""
    
    def __init__(self):
        super().__init__(LLMProvider.OPENAI)
    
    async def chat_completion(self, request: LLMRequest) -> LLMResponse:
        """OpenAI聊天补全"""
        await self._ensure_session()
        
        start_time = time.time()
        
        try:
            payload = {
                "model": request.model,
                "messages": [{"role": msg.role, "content": msg.content} for msg in request.messages],
                "temperature": request.temperature,
                "max_tokens": request.max_tokens,
                "stream": False
            }
            
            if request.stop:
                payload["stop"] = request.stop
            if request.top_p is not None:
                payload["top_p"] = request.top_p
            if request.frequency_penalty is not None:
                payload["frequency_penalty"] = request.frequency_penalty
            if request.presence_penalty is not None:
                payload["presence_penalty"] = request.presence_penalty
            
            async with self._session.post(f"{self.api_base}/chat/completions", json=payload) as response:
                if response.status != 200:
                    error_text = await response.text()
                    raise AIServiceError(f"OpenAI API错误: {response.status} - {error_text}")
                
                data = await response.json()
                
                return LLMResponse(
                    id=data["id"],
                    object=data["object"],
                    created=data["created"],
                    model=data["model"],
                    choices=data["choices"],
                    usage=data.get("usage", {}),
                    system_fingerprint=data.get("system_fingerprint"),
                    processing_time=time.time() - start_time
                )
        
        except asyncio.TimeoutError:
            raise AIServiceError("OpenAI API请求超时")
        except Exception as e:
            logger.error(f"OpenAI聊天补全失败: {str(e)}")
            raise AIServiceError(f"OpenAI聊天补全失败: {str(e)}")
    
    async def chat_completion_stream(self, request: LLMRequest) -> AsyncGenerator[LLMStreamChunk, None]:
        """OpenAI流式聊天补全"""
        await self._ensure_session()
        
        try:
            payload = {
                "model": request.model,
                "messages": [{"role": msg.role, "content": msg.content} for msg in request.messages],
                "temperature": request.temperature,
                "max_tokens": request.max_tokens,
                "stream": True
            }
            
            if request.stop:
                payload["stop"] = request.stop
            if request.top_p is not None:
                payload["top_p"] = request.top_p
            if request.frequency_penalty is not None:
                payload["frequency_penalty"] = request.frequency_penalty
            if request.presence_penalty is not None:
                payload["presence_penalty"] = request.presence_penalty
            
            async with self._session.post(f"{self.api_base}/chat/completions", json=payload) as response:
                if response.status != 200:
                    error_text = await response.text()
                    raise AIServiceError(f"OpenAI API错误: {response.status} - {error_text}")
                
                async for line in response.content:
                    line = line.decode('utf-8').strip()
                    if line.startswith("data: "):
                        data_str = line[6:]
                        if data_str == "[DONE]":
                            break
                        
                        try:
                            data = json.loads(data_str)
                            choices = data.get("choices", [])
                            if choices:
                                yield LLMStreamChunk(
                                    id=data["id"],
                                    object=data["object"],
                                    created=data["created"],
                                    model=data["model"],
                                    choices=choices,
                                    finish_reason=choices[0].get("finish_reason")
                                )
                        except json.JSONDecodeError:
                            continue
        
        except Exception as e:
            logger.error(f"OpenAI流式聊天补全失败: {str(e)}")
            raise AIServiceError(f"OpenAI流式聊天补全失败: {str(e)}")


class AzureOpenAIService(LLMService):
    """Azure OpenAI服务"""
    
    def __init__(self):
        super().__init__(LLMProvider.AZURE_OPENAI)
        self.api_version = settings.azure_openai_api_version
    
    async def chat_completion(self, request: LLMRequest) -> LLMResponse:
        """Azure OpenAI聊天补全"""
        await self._ensure_session()
        
        start_time = time.time()
        
        try:
            payload = {
                "messages": [{"role": msg.role, "content": msg.content} for msg in request.messages],
                "temperature": request.temperature,
                "max_tokens": request.max_tokens,
                "stream": False
            }
            
            if request.stop:
                payload["stop"] = request.stop
            if request.top_p is not None:
                payload["top_p"] = request.top_p
            if request.frequency_penalty is not None:
                payload["frequency_penalty"] = request.frequency_penalty
            if request.presence_penalty is not None:
                payload["presence_penalty"] = request.presence_penalty
            
            url = f"{self.api_base}/openai/deployments/{self.model}/chat/completions?api-version={self.api_version}"
            
            async with self._session.post(url, json=payload) as response:
                if response.status != 200:
                    error_text = await response.text()
                    raise AIServiceError(f"Azure OpenAI API错误: {response.status} - {error_text}")
                
                data = await response.json()
                
                return LLMResponse(
                    id=data["id"],
                    object=data["object"],
                    created=data["created"],
                    model=data["model"],
                    choices=data["choices"],
                    usage=data.get("usage", {}),
                    processing_time=time.time() - start_time
                )
        
        except Exception as e:
            logger.error(f"Azure OpenAI聊天补全失败: {str(e)}")
            raise AIServiceError(f"Azure OpenAI聊天补全失败: {str(e)}")


class QwenService(LLMService):
    """通义千问服务"""
    
    def __init__(self):
        super().__init__(LLMProvider.QWEN)
    
    async def chat_completion(self, request: LLMRequest) -> LLMResponse:
        """通义千问聊天补全"""
        await self._ensure_session()
        
        start_time = time.time()
        
        try:
            payload = {
                "model": request.model,
                "input": {
                    "messages": [{"role": msg.role, "content": msg.content} for msg in request.messages]
                },
                "parameters": {
                    "temperature": request.temperature,
                    "max_tokens": request.max_tokens
                }
            }
            
            if request.stop:
                payload["parameters"]["stop"] = request.stop
            if request.top_p is not None:
                payload["parameters"]["top_p"] = request.top_p
            
            async with self._session.post(f"{self.api_base}/services/aigc/text-generation/generation", json=payload) as response:
                if response.status != 200:
                    error_text = await response.text()
                    raise AIServiceError(f"通义千问API错误: {response.status} - {error_text}")
                
                data = await response.json()
                
                # 转换响应格式
                return LLMResponse(
                    id=data.get("request_id", ""),
                    object="chat.completion",
                    created=int(start_time),
                    model=request.model,
                    choices=[{
                        "index": 0,
                        "message": {
                            "role": "assistant",
                            "content": data.get("output", {}).get("text", "")
                        },
                        "finish_reason": data.get("output", {}).get("finish_reason", "stop")
                    }],
                    usage=data.get("usage", {}),
                    processing_time=time.time() - start_time
                )
        
        except Exception as e:
            logger.error(f"通义千问聊天补全失败: {str(e)}")
            raise AIServiceError(f"通义千问聊天补全失败: {str(e)}")


class ZhipuAIService(LLMService):
    """智谱AI服务"""
    
    def __init__(self):
        super().__init__(LLMProvider.ZHIPU)
    
    def _get_model(self) -> str:
        """获取模型名称"""
        return getattr(settings, 'zhipu_model', 'glm-4-flash')
    
    def _get_headers(self) -> Dict[str, str]:
        """获取请求头"""
        headers = {"Content-Type": "application/json"}
        headers["Authorization"] = f"Bearer {self.api_key}"
        return headers
    
    async def chat_completion(self, request: LLMRequest) -> LLMResponse:
        """智谱AI聊天补全"""
        await self._ensure_session()
        
        start_time = time.time()
        
        try:
            payload = {
                "model": request.model,
                "messages": [{"role": msg.role, "content": msg.content} for msg in request.messages],
                "temperature": request.temperature,
                "max_tokens": request.max_tokens,
                "stream": False,
                "thinking": {"type": request.think}
            }
            
            if request.stop:
                payload["stop"] = request.stop
            if request.top_p is not None:
                payload["top_p"] = request.top_p
            # 结果返回格式：json
            if request.response_format == "json_object":
                payload["response_format"] = {"type": "json_object"}
            
            async with self._session.post(f"{self.api_base}/chat/completions", json=payload) as response:
                if response.status != 200:
                    error_text = await response.text()
                    raise AIServiceError(f"智谱AI API错误: {response.status} - {error_text}")
                
                data = await response.json()
                logger.info(f"智谱解析结果：{data}")
                content = data.get("choices", [{}])[0].get("message", {}).get("content", "")
                reasoning_content = data.get("choices", [{}])[0].get("message", {}).get("reasoning_content", "")
                if reasoning_content:
                    content = reasoning_content
                return LLMResponse(
                    id=data.get("id", ""),
                    object=data.get("object", "chat.completion"),
                    created=data.get("created", int(start_time)),
                    model=data.get("model", request.model),
                    choices=data.get("choices", []),
                    content=content,
                    usage=data.get("usage", {}),
                    processing_time=time.time() - start_time
                )
        
        except Exception as e:
            logger.error(f"智谱AI聊天补全失败: {str(e)}")
            raise AIServiceError(f"智谱AI聊天补全失败: {str(e)}")
    
    async def chat_completion_stream(self, request: LLMRequest) -> AsyncGenerator[LLMStreamChunk, None]:
        """智谱AI流式聊天补全"""
        await self._ensure_session()
        
        try:
            payload = {
                "model": request.model,
                "messages": [{"role": msg.role, "content": msg.content} for msg in request.messages],
                "temperature": request.temperature,
                "max_tokens": request.max_tokens,
                "stream": True
            }
            
            if request.stop:
                payload["stop"] = request.stop
            if request.top_p is not None:
                payload["top_p"] = request.top_p
            
            async with self._session.post(f"{self.api_base}/chat/completions", json=payload) as response:
                if response.status != 200:
                    error_text = await response.text()
                    raise AIServiceError(f"智谱AI API错误: {response.status} - {error_text}")
                
                async for line in response.content:
                    line = line.decode('utf-8').strip()
                    if line.startswith("data: "):
                        data_str = line[6:]
                        if data_str == "[DONE]":
                            break
                        
                        try:
                            data = json.loads(data_str)
                            choices = data.get("choices", [])
                            if choices:
                                yield LLMStreamChunk(
                                    id=data.get("id", ""),
                                    object=data.get("object", "chat.completion.chunk"),
                                    created=data.get("created", int(time.time())),
                                    model=data.get("model", request.model),
                                    choices=choices,
                                    finish_reason=choices[0].get("finish_reason")
                                )
                        except json.JSONDecodeError:
                            continue
        
        except Exception as e:
            logger.error(f"智谱AI流式聊天补全失败: {str(e)}")
            raise AIServiceError(f"智谱AI流式聊天补全失败: {str(e)}")


class LLMServiceManager:
    """大模型服务管理器"""
    
    def __init__(self):
        self._services: Dict[LLMProvider, LLMService] = {}
        self._default_provider = LLMProvider(settings.llm_provider)
        
    async def get_service(self, provider: str = "zhipu") -> LLMService:
        """获取大模型服务"""
        if provider is None:
            provider = self._default_provider
        
        if provider not in self._services:
            if provider == LLMProvider.OPENAI:
                self._services[provider] = OpenAIService()
            elif provider == LLMProvider.AZURE_OPENAI:
                self._services[provider] = AzureOpenAIService()
            elif provider == LLMProvider.QWEN:
                self._services[provider] = QwenService()
            elif provider == LLMProvider.ZHIPU:
                self._services[provider] = ZhipuAIService()
            else:
                raise ValidationError(f"不支持的提供商: {provider}")
        
        return self._services[provider]
    
    async def chat_completion(
        self, 
        messages: List[LLMMessage], 
        provider: Optional[LLMProvider] = None,
        **kwargs
    ) -> LLMResponse:
        """聊天补全"""
        service = await self.get_service(provider)
        
        request = LLMRequest(
            messages=messages,
            model=kwargs.get("model",service.model),
            temperature=kwargs.get("temperature", settings.llm_temperature),
            max_tokens=kwargs.get("max_tokens", settings.llm_max_tokens),
            stream=False,
            stop=kwargs.get("stop"),
            top_p=kwargs.get("top_p"),
            frequency_penalty=kwargs.get("frequency_penalty"),
            presence_penalty=kwargs.get("presence_penalty")
        )
        
        return await service.chat_completion(request)
    
    async def chat_completion_stream(
        self, 
        messages: List[LLMMessage], 
        provider: Optional[LLMProvider] = None,
        **kwargs
    ) -> AsyncGenerator[LLMStreamChunk, None]:
        """流式聊天补全"""
        service = await self.get_service(provider)
        
        request = LLMRequest(
            messages=messages,
            model=kwargs.get("model_code",service.model),
            temperature=kwargs.get("temperature", settings.llm_temperature),
            max_tokens=kwargs.get("max_tokens", settings.llm_max_tokens),
            stream=True,
            stop=kwargs.get("stop"),
            top_p=kwargs.get("top_p"),
            frequency_penalty=kwargs.get("frequency_penalty"),
            presence_penalty=kwargs.get("presence_penalty")
        )
        
        async for chunk in service.chat_completion_stream(request):
            yield chunk

    def _parse_response(self, content: str) -> Dict[str, Any]:
        """解析大模型响应"""
        try:
            # 尝试解析JSON
            return json.loads(content)
        except json.JSONDecodeError:
            # 如果不是JSON，返回原始内容
            return {"raw_content": content}
    
    async def health_check(self, provider: Optional[LLMProvider] = None) -> Dict[str, Any]:
        """健康检查"""
        service = await self.get_service(provider)
        return await service.health_check()


# 全局大模型服务管理器
llm_service_manager = LLMServiceManager()