"""
AI大模型API统一路由接口
支持多平台切换：通义千问、智谱GLM、OpenAI等
"""

import os
import json
import time
import yaml
import logging
from typing import Dict, Any, Optional, List, Iterator
from dataclasses import dataclass
from abc import ABC, abstractmethod
import requests
from datetime import datetime, timedelta
import hashlib
from functools import wraps

# 尝试导入ZhipuAI客户端
try:
    from zai import ZhipuAiClient
    ZAI_AVAILABLE = True
except ImportError:
    ZAI_AVAILABLE = False
    print("警告：未安装zai-sdk包，GLM功能将受限")

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

@dataclass
class AIRequest:
    """AI请求数据结构"""
    function: str
    prompt: str
    context: Optional[Dict[str, Any]] = None
    max_tokens: Optional[int] = None
    temperature: Optional[float] = None

@dataclass
class AIResponse:
    """AI响应数据结构"""
    content: str
    model: str
    provider: str
    usage: Dict[str, int]
    response_time: float
    cached: bool = False

class BaseProvider(ABC):
    """AI提供商基类"""
    
    def __init__(self, config: Dict[str, Any]):
        self.config = config
        self.name = config.get('name', 'Unknown')
        self.api_key = self._get_api_key()
        self.base_url = config.get('base_url', '')
        self.timeout = config.get('timeout', 30)
        self.retry_count = config.get('retry_count', 3)
    
    def _get_api_key(self) -> str:
        """从环境变量获取API密钥"""
        env_mapping = {
            'qwen': 'DASHSCOPE_API_KEY',
            'glm': 'ZHIPU_API_KEY',
            'openai': 'OPENAI_API_KEY',
            'modelscope': 'MODELSCOPE_API_KEY'
        }
        
        provider_key = env_mapping.get(self.name.lower())
        if provider_key:
            api_key = os.getenv(provider_key, '')
            if not api_key:
                logger.warning(f"未找到环境变量 {provider_key}")
            return api_key
        return ''
    
    @abstractmethod
    def chat_completion(self, prompt: str, **kwargs) -> AIResponse:
        """抽象方法：聊天补全"""
        pass

class QwenProvider(BaseProvider):
    """通义千问提供商"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.model = config.get('models', {}).get('analysis', 'qwen-turbo')
    
    def chat_completion(self, prompt: str, **kwargs) -> AIResponse:
        """通义千问聊天补全"""
        headers = {
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json'
        }
        
        payload = {
            'model': self.model,
            'input': {
                'messages': [
                    {'role': 'system', 'content': '你是一个供应链分析专家，请提供专业的分析和建议。'},
                    {'role': 'user', 'content': prompt}
                ]
            },
            'parameters': {
                'max_tokens': kwargs.get('max_tokens', self.config.get('max_tokens', 1500)),
                'temperature': kwargs.get('temperature', self.config.get('temperature', 0.7))
            }
        }
        
        start_time = time.time()
        
        for attempt in range(self.retry_count):
            try:
                response = requests.post(
                    f"{self.base_url}/services/aigc/text-generation/generation",
                    headers=headers,
                    json=payload,
                    timeout=self.timeout
                )
                
                if response.status_code == 200:
                    result = response.json()
                    content = result.get('output', {}).get('text', '')
                    usage = result.get('usage', {})
                    
                    return AIResponse(
                        content=content,
                        model=self.model,
                        provider='qwen',
                        usage=usage,
                        response_time=time.time() - start_time
                    )
                else:
                    logger.error(f"通义千问API错误: {response.status_code} - {response.text}")
                    
            except Exception as e:
                logger.error(f"通义千问请求失败 (尝试 {attempt + 1}): {str(e)}")
                if attempt == self.retry_count - 1:
                    raise
                time.sleep(2 ** attempt)
        
        raise Exception("通义千问API调用失败")

class GLMProvider(BaseProvider):
    """智谱GLM提供商 - 使用zai-sdk"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.model = config.get('models', {}).get('analysis', 'glm-4.5')
        if not ZAI_AVAILABLE:
            raise ImportError("需要安装zai-sdk: pip install zai-sdk")
        # 创建客户端，设置超时
        self.client = ZhipuAiClient(
            api_key=self.api_key,
            timeout=self.timeout
        )
    
    def chat_completion(self, prompt: str, **kwargs) -> AIResponse:
        """智谱GLM聊天补全 - 使用zai-sdk"""
        start_time = time.time()
        
        messages = [
            {'role': 'system', 'content': '你是一个供应链分析专家，请提供专业的分析和建议。'},
            {'role': 'user', 'content': prompt}
        ]
        
        for attempt in range(self.retry_count):
            try:
                # 简化重试逻辑，让zai-sdk处理内部重试
                response = self.client.chat.completions.create(
                    model=self.model,
                    messages=messages,
                    max_tokens=kwargs.get('max_tokens', self.config.get('max_tokens', 1500)),
                    temperature=kwargs.get('temperature', self.config.get('temperature', 0.7)),
                    stream=False
                )
                
                # 处理响应 - 修复reasoning_content问题
                if hasattr(response, 'choices') and response.choices:
                    message = response.choices[0].message
                    
                    # 优先使用content，如果为空则使用reasoning_content
                    content = getattr(message, 'content', '')
                    reasoning_content = getattr(message, 'reasoning_content', '')
                    
                    if content and content.strip():
                        final_content = content.strip()
                    elif reasoning_content and reasoning_content.strip():
                        final_content = reasoning_content.strip()
                    else:
                        raise Exception("GLM返回空响应")
                    
                    usage = {
                        'prompt_tokens': getattr(response.usage, 'prompt_tokens', 0) if hasattr(response, 'usage') else 0,
                        'completion_tokens': getattr(response.usage, 'completion_tokens', 0) if hasattr(response, 'usage') else 0,
                        'total_tokens': getattr(response.usage, 'total_tokens', 0) if hasattr(response, 'usage') else 0
                    }
                    
                    return AIResponse(
                        content=final_content,
                        model=self.model,
                        provider='glm',
                        usage=usage,
                        response_time=time.time() - start_time
                    )
                else:
                    logger.error(f"智谱GLM API响应格式错误: {response}")
                    
            except Exception as e:
                logger.error(f"智谱GLM请求失败 (尝试 {attempt + 1}): {str(e)}")
                if attempt == self.retry_count - 1:
                    raise
                time.sleep(2 ** attempt)
        
        raise Exception("智谱GLM API调用失败，已达到最大重试次数")

class OpenAIProvider(BaseProvider):
    """OpenAI提供商"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.model = config.get('models', {}).get('analysis', 'gpt-3.5-turbo')
    
    def chat_completion(self, prompt: str, **kwargs) -> AIResponse:
        """OpenAI聊天补全"""
        headers = {
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json'
        }
        
        payload = {
            'model': self.model,
            'messages': [
                {'role': 'system', 'content': '你是一个供应链分析专家，请提供专业的分析和建议。'},
                {'role': 'user', 'content': prompt}
            ],
            'max_tokens': kwargs.get('max_tokens', self.config.get('max_tokens', 1500)),
            'temperature': kwargs.get('temperature', self.config.get('temperature', 0.7))
        }
        
        start_time = time.time()
        
        for attempt in range(self.retry_count):
            try:
                response = requests.post(
                    f"{self.base_url}/chat/completions",
                    headers=headers,
                    json=payload,
                    timeout=self.timeout
                )
                
                if response.status_code == 200:
                    result = response.json()
                    content = result.get('choices', [{}])[0].get('message', {}).get('content', '')
                    usage = result.get('usage', {})
                    
                    return AIResponse(
                        content=content,
                        model=self.model,
                        provider='openai',
                        usage=usage,
                        response_time=time.time() - start_time
                    )
                else:
                    logger.error(f"OpenAI API错误: {response.status_code} - {response.text}")
                    
            except Exception as e:
                logger.error(f"OpenAI请求失败 (尝试 {attempt + 1}): {str(e)}")
                if attempt == self.retry_count - 1:
                    raise
                time.sleep(2 ** attempt)
        
        raise Exception("OpenAI API调用失败")


class ModelScopeProvider(BaseProvider):
    """ModelScope API提供商"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        self.model = config.get('models', {}).get('analysis', 'Qwen/Qwen2.5-Coder-32B-Instruct')
    
    def chat_completion(self, prompt: str, **kwargs) -> AIResponse:
        """ModelScope聊天补全"""
        headers = {
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json'
        }
        
        payload = {
            'model': self.model,
            'messages': [
                {'role': 'system', 'content': '你是一个供应链分析专家，请提供专业的分析和建议。'},
                {'role': 'user', 'content': prompt}
            ],
            'max_tokens': kwargs.get('max_tokens', self.config.get('max_tokens', 8192)),
            'temperature': kwargs.get('temperature', self.config.get('temperature', 0.7))
        }
        
        start_time = time.time()
        
        for attempt in range(self.retry_count):
            try:
                response = requests.post(
                    f"{self.base_url}/chat/completions",
                    headers=headers,
                    json=payload,
                    timeout=self.timeout
                )
                
                if response.status_code == 200:
                    result = response.json()
                    content = result.get('choices', [{}])[0].get('message', {}).get('content', '')
                    usage = result.get('usage', {})
                    
                    return AIResponse(
                        content=content,
                        model=self.model,
                        provider='modelscope',
                        usage=usage,
                        response_time=time.time() - start_time
                    )
                else:
                    logger.error(f"ModelScope API错误: {response.status_code} - {response.text}")
                    
            except Exception as e:
                logger.error(f"ModelScope请求失败 (尝试 {attempt + 1}): {str(e)}")
                if attempt == self.retry_count - 1:
                    raise
                time.sleep(2 ** attempt)
        
        raise Exception("ModelScope API调用失败")

class ZhipuAIProvider(BaseProvider):
    """智谱GLM专用ZhipuAI客户端"""
    
    def __init__(self, config: Dict[str, Any]):
        super().__init__(config)
        if not ZAI_AVAILABLE:
            raise Exception("zai-sdk包未安装，请运行: pip install zai-sdk")
        
        self.model = config.get('models', {}).get('analysis', 'glm-4')
        self.client = ZhipuAiClient(api_key=self.api_key)
    
    def chat_completion(self, prompt: str, **kwargs) -> AIResponse:
        """智谱GLM聊天补全，支持深度思考"""
        messages = [
            {'role': 'system', 'content': '你是一个供应链分析专家，请提供专业的分析和建议。'},
            {'role': 'user', 'content': prompt}
        ]
        
        start_time = time.time()
        
        try:
            response = self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                max_tokens=kwargs.get('max_tokens', self.config.get('max_tokens', 4096)),
                temperature=kwargs.get('temperature', self.config.get('temperature', 0.7)),
                thinking={"type": "enabled"} if kwargs.get('thinking', True) else None
            )
            
            content = response.choices[0].message.content
            usage = {
                'prompt_tokens': response.usage.prompt_tokens,
                'completion_tokens': response.usage.completion_tokens,
                'total_tokens': response.usage.total_tokens
            }
            
            return AIResponse(
                content=content,
                model=self.model,
                provider='zhipu',
                usage=usage,
                response_time=time.time() - start_time
            )
            
        except Exception as e:
            logger.error(f"智谱GLM请求失败: {str(e)}")
            raise Exception(f"智谱GLM API调用失败: {str(e)}")
    
    def chat_completion_stream(self, prompt: str, **kwargs) -> Iterator[str]:
        """智谱GLM流式输出"""
        messages = [
            {'role': 'system', 'content': '你是一个供应链分析专家，请提供专业的分析和建议。'},
            {'role': 'user', 'content': prompt}
        ]
        
        try:
            response = self.client.chat.completions.create(
                model=self.model,
                messages=messages,
                max_tokens=kwargs.get('max_tokens', self.config.get('max_tokens', 4096)),
                temperature=kwargs.get('temperature', self.config.get('temperature', 0.7)),
                stream=True,
                thinking={"type": "enabled"} if kwargs.get('thinking', True) else None
            )
            
            for chunk in response:
                if chunk.choices and chunk.choices[0].delta.content:
                    yield chunk.choices[0].delta.content
                    
        except Exception as e:
            yield f"错误: {str(e)}"

class SimpleCache:
    """简单内存缓存"""
    
    def __init__(self, ttl: int = 1800, max_size: int = 1000):
        self.cache = {}
        self.ttl = ttl
        self.max_size = max_size
    
    def _generate_key(self, prompt: str, provider: str, model: str) -> str:
        """生成缓存键"""
        content = f"{prompt}:{provider}:{model}"
        return hashlib.md5(content.encode()).hexdigest()
    
    def get(self, prompt: str, provider: str, model: str) -> Optional[AIResponse]:
        """获取缓存"""
        key = self._generate_key(prompt, provider, model)
        if key in self.cache:
            entry = self.cache[key]
            if datetime.now() < entry['expires']:
                response = entry['response']
                response.cached = True
                return response
            else:
                del self.cache[key]
        return None
    
    def set(self, prompt: str, provider: str, model: str, response: AIResponse):
        """设置缓存"""
        if len(self.cache) >= self.max_size:
            # 简单的LRU：删除最旧的条目
            oldest_key = min(self.cache.keys(), key=lambda k: self.cache[k]['timestamp'])
            del self.cache[oldest_key]
        
        key = self._generate_key(prompt, provider, model)
        self.cache[key] = {
            'response': response,
            'timestamp': datetime.now(),
            'expires': datetime.now() + timedelta(seconds=self.ttl)
        }

class AIRouter:
    """AI API统一路由"""
    
    def __init__(self, config_path: str = "config/ai_config.yaml"):
        self.config = self._load_config(config_path)
        self.providers = self._init_providers()
        self.cache = SimpleCache(
            ttl=self.config.get('cache', {}).get('ai_response_ttl', 1800),
            max_size=self.config.get('cache', {}).get('max_cache_size', 1000)
        )
        self.rate_limiter = self._init_rate_limiter()
    
    @staticmethod
    def create_provider(provider_name: str, config: Dict[str, Any]) -> BaseProvider:
        """创建AI提供商实例"""
        if provider_name == 'qwen':
            return QwenProvider(config)
        elif provider_name == 'glm':
            return GLMProvider(config)
        elif provider_name == 'modelscope':
            return ModelScopeProvider(config)
        elif provider_name == 'openai':
            return OpenAIProvider(config)
        elif provider_name == 'zhipu':
            return ZhipuAIProvider(config)
        else:
            raise ValueError(f"不支持的AI提供商: {provider_name}")
    
    def _load_config(self, config_path: str) -> Dict[str, Any]:
        """加载配置文件"""
        try:
            with open(config_path, 'r', encoding='utf-8') as f:
                return yaml.safe_load(f)
        except Exception as e:
            logger.error(f"加载AI配置文件失败: {str(e)}")
            raise
    
    def _init_providers(self) -> Dict[str, BaseProvider]:
        """初始化提供商"""
        providers = {}
        provider_configs = self.config.get('providers', {})
        
        provider_mapping = {
            'qwen': QwenProvider,
            'glm': GLMProvider,
            'modelscope': ModelScopeProvider,
            'openai': OpenAIProvider,
            'zhipu': ZhipuAIProvider
        }
        
        for provider_name, config in provider_configs.items():
            if provider_name in provider_mapping:
                try:
                    providers[provider_name] = provider_mapping[provider_name](config)
                    logger.info(f"初始化 {provider_name} 提供商成功")
                except Exception as e:
                    logger.error(f"初始化 {provider_name} 提供商失败: {str(e)}")
        
        return providers
    
    def _init_rate_limiter(self):
        """初始化限流器"""
        rate_config = self.config.get('rate_limit', {})
        return {
            'requests_per_minute': rate_config.get('requests_per_minute', 60),
            'requests_per_hour': rate_config.get('requests_per_hour', 1000),
            'request_times': []
        }
    
    def switch_provider(self, provider_name: str) -> bool:
        """切换AI提供商"""
        if provider_name in self.providers:
            self.config['active_provider'] = provider_name
            logger.info(f"切换到 {provider_name} 提供商")
            return True
        else:
            logger.error(f"提供商 {provider_name} 不存在或未正确初始化")
            return False
    
    def get_active_provider(self) -> BaseProvider:
        """获取当前激活的提供商"""
        active_name = self.config.get('active_provider', 'qwen')
        if active_name in self.providers:
            return self.providers[active_name]
        else:
            # 返回第一个可用的提供商
            if self.providers:
                return next(iter(self.providers.values()))
            else:
                raise Exception("没有可用的AI提供商")
    
    def _check_rate_limit(self) -> bool:
        """检查限流"""
        now = datetime.now()
        self.rate_limiter['request_times'] = [
            t for t in self.rate_limiter['request_times']
            if now - t < timedelta(hours=1)
        ]
        
        if len(self.rate_limiter['request_times']) >= self.rate_limiter['requests_per_hour']:
            return False
        
        recent_requests = [
            t for t in self.rate_limiter['request_times']
            if now - t < timedelta(minutes=1)
        ]
        
        if len(recent_requests) >= self.rate_limiter['requests_per_minute']:
            return False
        
        self.rate_limiter['request_times'].append(now)
        return True
    
    def process_request(self, request: AIRequest) -> AIResponse:
        """处理AI请求"""
        if not self._check_rate_limit():
            raise Exception("超过请求频率限制")
        
        provider = self.get_active_provider()
        
        # 检查缓存
        cached_response = self.cache.get(
            request.prompt, 
            provider.name.lower(),
            provider.model if hasattr(provider, 'model') else 'default'
        )
        
        if cached_response:
            logger.info("使用缓存的AI响应")
            return cached_response
        
        # 调用API
        response = provider.chat_completion(
            prompt=request.prompt,
            max_tokens=request.max_tokens,
            temperature=request.temperature
        )
        
        # 缓存响应
        self.cache.set(
            request.prompt,
            provider.name.lower(),
            provider.model if hasattr(provider, 'model') else 'default',
            response
        )
        
        return response

# 全局路由实例
ai_router = None

def get_ai_router() -> AIRouter:
    """获取AI路由单例"""
    global ai_router
    if ai_router is None:
        ai_router = AIRouter()
    return ai_router

# 便捷函数
def ai_chat(prompt: str, provider: str = None, **kwargs) -> str:
    """便捷的AI聊天函数"""
    router = get_ai_router()
    
    if provider:
        router.switch_provider(provider)
    
    request = AIRequest(function='chat', prompt=prompt, **kwargs)
    response = router.process_request(request)
    
    return response.content

if __name__ == "__main__":
    # 测试代码
    router = get_ai_router()
    
    # 测试切换提供商
    print("可用提供商:", list(router.providers.keys()))
    print("当前激活:", router.config.get('active_provider'))
    
    # 测试缓存
    test_prompt = "请分析供应链中的库存优化策略"
    try:
        response = ai_chat(test_prompt)
        print("AI响应:", response[:100] + "...")
    except Exception as e:
        print("测试失败:", str(e))