"""
云端LLM服务客户端
支持OpenAI、阿里云通义千问等云端LLM API
作为本地模型的降级方案
"""

import os
import asyncio
from enum import Enum
from typing import Optional, Dict, Any, List
import aiohttp
from utils.logger import Logger
from utils.exceptions import ModelError
from config.model_config import ModelConfig

# 从共享types文件导入类型定义
from typing import Optional, Dict, Any
from middleware.types import CloudLLMType, BaseModelClientProtocol
# 移除直接导入以避免循环导入

logger = Logger.get_logger("cloud_llm_client")


class CloudLLMType(Enum):
    """云端LLM服务类型"""
    OPENAI = "openai"
    DASHSCOPE = "dashscope"  # 阿里云通义千问
    ZHIPU = "zhipu"  # 智谱AI
    TOGETHER_AI = "together_ai"  # Together AI


class CloudLLMClient:
    """云端LLM服务客户端基类"""
    
    def __init__(self, cloud_type: CloudLLMType, config: Optional[ModelConfig] = None):
        # 确保cloud_type属性优先设置
        self.cloud_type = cloud_type
        
        # 基本属性初始化
        self.api_key = None
        self.base_url = None
        self.model_name = None
        self.timeout = 30.0
        self._session: Optional[aiohttp.ClientSession] = None
        
        # 为了避免循环导入，我们不直接继承BaseModelClient，而是实现必要的方法
        # 模拟BaseModelClient的一些基本属性
        from middleware.model_client import ModelType
        self.model_type = ModelType.FALLBACK
        self.config = config
        self.is_healthy = True
        self.last_error = None
        
        # 加载配置
        self._load_config()
    
    def _load_config(self):
        """加载配置"""
        if self.cloud_type == CloudLLMType.OPENAI:
            self.api_key = os.getenv("OPENAI_API_KEY", "")
            self.base_url = os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1")
            self.model_name = "gpt-3.5-turbo"
        elif self.cloud_type == CloudLLMType.DASHSCOPE:
            # 优先使用 ALIYUN_API_KEY，兼容 DASHSCOPE_API_KEY
            self.api_key = os.getenv("ALIYUN_API_KEY", os.getenv("DASHSCOPE_API_KEY", ""))
            self.region = os.getenv("ALIYUN_API_REGION", "cn-hangzhou")
            self.version = os.getenv("ALIYUN_API_VERSION", "2023-01-01")
            self.base_url = "https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation"
            self.model_name = "qwen-plus"
            
            # 打印详细的配置诊断信息
            api_key_set = "已设置(长度: {})" if self.api_key else "未设置"
            api_key_info = api_key_set.format(len(self.api_key)) if self.api_key else "未设置"
            logger.info(f"阿里云DASHSCOPE配置: API密钥={api_key_info}, 区域={self.region}, 版本={self.version}")
        
        logger.info(f"云端LLM客户端初始化: {self.cloud_type.value}, model={self.model_name}")
    
    async def _ensure_session(self):
        """确保HTTP会话已创建"""
        if not self._session or self._session.closed:
            self._session = aiohttp.ClientSession(
                timeout=aiohttp.ClientTimeout(total=self.timeout)
            )
    
    async def _close_session(self):
        """关闭HTTP会话"""
        if self._session and not self._session.closed:
            await self._session.close()
    
    async def generate(self, prompt: str, **kwargs) -> str:
        """生成文本"""
        try:
            await self._ensure_session()
            
            if self.cloud_type == CloudLLMType.OPENAI:
                return await self._generate_openai(prompt, **kwargs)
            elif self.cloud_type == CloudLLMType.DASHSCOPE:
                return await self._generate_dashscope(prompt, **kwargs)
            else:
                raise NotImplementedError(f"不支持的云端LLM类型: {self.cloud_type.value}")
        
        except Exception as e:
            logger.error(f"云端LLM生成失败 [{self.cloud_type.value}]: {str(e)}")
            raise ModelError(f"云端LLM调用失败: {str(e)}")
    
    async def _generate_openai(self, prompt: str, **kwargs) -> str:
        """使用OpenAI生成文本"""
        if not self.api_key:
            raise ModelError("OpenAI API密钥未配置")
        
        messages = [
            {"role": "system", "content": "你是一个有用的AI助手。"},
            {"role": "user", "content": prompt}
        ]
        
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }
        
        payload = {
            "model": kwargs.get("model", self.model_name),
            "messages": messages,
            "max_tokens": kwargs.get("max_tokens", 1024),
            "temperature": kwargs.get("temperature", 0.7),
        }
        
        async with self._session.post(
            f"{self.base_url}/chat/completions",
            headers=headers,
            json=payload
        ) as response:
            if response.status != 200:
                raise ModelError(f"OpenAI API错误: {response.status}, {await response.text()}")
            
            data = await response.json()
            return data["choices"][0]["message"]["content"]
    
    async def _generate_dashscope(self, prompt: str, **kwargs) -> str:
        """使用阿里云通义千问生成文本"""
        import time
        if not self.api_key:
            raise ModelError("阿里云通义千问API密钥未配置")
        
        start_time = time.time()
        
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }
        
        model_name = kwargs.get("model", self.model_name)
        max_tokens = kwargs.get("max_tokens", 1024)
        
        payload = {
            "model": model_name,
            "input": {
                "messages": [
                    {"role": "system", "content": "你是一个有用的AI助手。"},
                    {"role": "user", "content": prompt}
                ]
            },
            "parameters": {
                "max_tokens": max_tokens,
                "temperature": kwargs.get("temperature", 0.7),
            }
        }
        
        logger.info(f"DASHSCOPE请求参数: model={model_name}, max_tokens={max_tokens}")
        logger.info(f"DASHSCOPE请求头检查: Authorization={'已设置' if self.api_key else '未设置'}, 长度={len(self.api_key) if self.api_key else 0}字符")
        
        async with self._session.post(
            self.base_url,
            headers=headers,
            json=payload
        ) as response:
            end_time = time.time()
            logger.info(f"DASHSCOPE API调用耗时: {end_time - start_time:.2f}秒, 状态码: {response.status}")
            
            if response.status != 200:
                error_text = await response.text()
                logger.error(f"DASHSCOPE API调用失败: {response.status} {error_text}")
                raise ModelError(f"阿里云通义千问API错误: {response.status}, {error_text}")
            
            data = await response.json()
            logger.info(f"DASHSCOPE响应: {data}")
            return data["output"]["text"]
    
    async def health_check(self) -> bool:
        """健康检查"""
        try:
            if not self.api_key:
                logger.warning(f"{self.cloud_type.value} 健康检查失败: API密钥未配置")
                return False
            
            # 确保会话已创建
            await self._ensure_session()
            
            # 不同服务的健康检查逻辑
            if self.cloud_type == CloudLLMType.OPENAI:
                headers = {"Authorization": f"Bearer {self.api_key}"}
                async with self._session.get(
                    f"{self.base_url}/models",
                    headers=headers
                ) as response:
                    logger.info(f"OpenAI健康检查: 状态码={response.status}")
                    return response.status == 200
            elif self.cloud_type == CloudLLMType.DASHSCOPE:
                # 对于DASHSCOPE，我们可以做一个简单的轻量级请求来验证API密钥
                try:
                    # 记录开始健康检查
                    logger.info("开始DASHSCOPE健康检查...")
                    
                    # 构造一个简单的请求进行验证
                    headers = {
                        "Authorization": f"Bearer {self.api_key}",
                        "Content-Type": "application/json"
                    }
                    
                    # 使用最小的token数进行健康检查
                    payload = {
                        "model": self.model_name,
                        "input": {
                            "prompt": "test"
                        },
                        "parameters": {
                            "max_tokens": 1,
                            "temperature": 0.0
                        }
                    }
                    
                    # 发送请求并检查响应
                    async with self._session.post(
                        self.base_url,
                        headers=headers,
                        json=payload
                    ) as response:
                        status = response.status
                        logger.info(f"DASHSCOPE健康检查: 状态码={status}")
                        
                        # 即使是401错误也提供详细信息，帮助诊断
                        if status != 200:
                            error_text = await response.text()
                            logger.warning(f"DASHSCOPE健康检查失败: 状态码={status}, 响应={error_text}")
                        
                        # 只有200才表示健康检查通过
                        return status == 200
                except Exception as e:
                    logger.error(f"DASHSCOPE健康检查异常: {str(e)}")
                    # 即使健康检查异常，我们也尝试返回True，让系统可以继续使用该客户端
                    # 这样可以避免因为网络波动等临时问题导致服务不可用
                    logger.info("忽略DASHSCOPE健康检查异常，尝试继续使用")
                    return True
            
            # 对于其他未实现特定健康检查的服务
            logger.info(f"{self.cloud_type.value} 使用默认健康检查")
            return True
            
        except Exception as e:
            logger.error(f"{self.cloud_type.value} 健康检查异常: {str(e)}")
            # 即使健康检查异常，我们也尝试返回True，让系统可以继续使用该客户端
            return True
    
    async def __aenter__(self):
        await self._ensure_session()
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        await self._close_session()


class CloudLLMFactory:
    """云端LLM工厂，创建不同类型的云端LLM客户端"""
    
    @staticmethod
    async def create_client(cloud_type: str = None) -> Optional[CloudLLMClient]:
        """创建云端LLM客户端
        
        Args:
            cloud_type: 云端服务类型，如果为None则自动选择可用的服务
            
        Returns:
            可用的云端LLM客户端，或None（如果没有可用服务）
        """
        # 支持的服务列表，按优先级排序
        services = [CloudLLMType.OPENAI, CloudLLMType.DASHSCOPE]
        
        # 如果指定了类型，先检查该类型
        if cloud_type:
            try:
                service_type = CloudLLMType(cloud_type)
                client = CloudLLMClient(service_type)
                if await client.health_check():
                    logger.info(f"使用指定的云端LLM服务: {cloud_type}")
                    return client
            except (ValueError, Exception) as e:
                logger.warning(f"指定的云端LLM服务不可用: {cloud_type}, {str(e)}")
        
        # 自动选择第一个可用的服务
        for service_type in services:
            try:
                client = CloudLLMClient(service_type)
                if await client.health_check():
                    logger.info(f"自动选择云端LLM服务: {service_type.value}")
                    return client
            except Exception as e:
                logger.debug(f"云端LLM服务不可用: {service_type.value}, {str(e)}")
        
        logger.warning("没有可用的云端LLM服务")
        return None


# 全局云端LLM管理器
class CloudLLMManager:
    """云端LLM管理器，管理云端模型的使用策略"""
    
    def __init__(self):
        self._clients: Dict[CloudLLMType, CloudLLMClient] = {}
        self._fallback_enabled = True
    
    async def initialize(self):
        """初始化管理器"""
        # 预加载云端客户端
        for service_type in CloudLLMType:
            try:
                client = CloudLLMClient(service_type)
                if await client.health_check():
                    self._clients[service_type] = client
                    logger.info(f"云端LLM服务可用: {service_type.value}")
            except Exception as e:
                logger.debug(f"初始化云端LLM服务失败: {service_type.value}, {str(e)}")
    
    async def get_fallback_client(self) -> Optional[CloudLLMClient]:
        """获取可用的降级客户端"""
        if not self._fallback_enabled:
            return None
        
        # 优先使用已经初始化的客户端
        for client in self._clients.values():
            if await client.health_check():
                return client
        
        # 尝试创建新客户端
        return await CloudLLMFactory.create_client()
    
    def set_fallback_enabled(self, enabled: bool):
        """设置是否启用降级"""
        self._fallback_enabled = enabled
        logger.info(f"云端LLM降级功能已{'启用' if enabled else '禁用'}")


# 创建全局管理器实例
cloud_llm_manager = CloudLLMManager()


def get_cloud_llm_manager() -> CloudLLMManager:
    """获取云端LLM管理器"""
    return cloud_llm_manager


async def init_cloud_llm():
    """初始化云端LLM服务"""
    manager = get_cloud_llm_manager()
    await manager.initialize()
    logger.info("云端LLM服务初始化完成")