"""
并行模型生成器，用于多模型对比功能。
处理多个AI模型的并发生成，包含超时和错误处理。
"""

import asyncio
import time
from typing import Dict, List, Any, Optional, Callable
from dataclasses import dataclass
from enum import Enum
import logging
from concurrent.futures import ThreadPoolExecutor
import traceback

from langchain.chains import LLMChain
from langchain.prompts import ChatPromptTemplate
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain_openai import ChatOpenAI
from langchain_community.llms import QianfanLLMEndpoint

# 导入配置
try:
    from services.ai.src.config.comparison_config import comparison_config
except ImportError:
    # 如果配置文件不存在，使用默认值
    class comparison_config:
        MAX_CONCURRENT_MODELS = 4
        DEFAULT_TIMEOUT = 30.0
        USER_CONCURRENCY_LIMIT = 3

logger = logging.getLogger(__name__)


class ModelStatus(Enum):
    """模型生成任务状态"""
    PENDING = "pending"
    GENERATING = "generating" 
    COMPLETED = "completed"
    FAILED = "failed"
    TIMEOUT = "timeout"


@dataclass
class ModelConfig:
    """模型配置"""
    name: str
    version: str
    parameters: Dict[str, Any]
    timeout: float = 30.0  # 秒
    retry_count: int = 1


@dataclass
class GenerationResult:
    """单个模型生成结果"""
    model: str
    content: str
    tokens: int
    time_taken: float
    status: ModelStatus
    error: Optional[str] = None
    metadata: Dict[str, Any] = None


class ParallelModelGenerator:
    """
    管理多个语言模型的并行生成。
    支持并发执行、超时和错误处理。
    """
    
    def __init__(self, 
                 max_concurrent: int = None,
                 default_timeout: float = None,
                 rate_limit_per_minute: int = 60):
        """
        初始化并行生成器。
        
        Args:
            max_concurrent: 最大并发模型调用数（None时使用配置）
            default_timeout: 每个模型的默认超时时间（秒，None时使用配置）
            rate_limit_per_minute: API调用速率限制
        """
        # 使用配置文件或传入的值
        self.max_concurrent = max_concurrent or comparison_config.MAX_CONCURRENT_MODELS
        self.default_timeout = default_timeout or comparison_config.DEFAULT_TIMEOUT
        self.rate_limit_per_minute = rate_limit_per_minute
        self.semaphore = asyncio.Semaphore(max_concurrent)
        self.rate_limiter = AsyncRateLimiter(rate_limit_per_minute)
        self.models = self._initialize_models()
        
    def _initialize_models(self) -> Dict[str, Any]:
        """初始化可用的语言模型"""
        models = {}
        
        # 初始化OpenAI模型
        try:
            models["gpt-4"] = ChatOpenAI(
                model="gpt-4",
                temperature=0.7,
                streaming=True,
                request_timeout=30
            )
            models["gpt-3.5-turbo"] = ChatOpenAI(
                model="gpt-3.5-turbo",
                temperature=0.7,
                streaming=True,
                request_timeout=30
            )
        except Exception as e:
            logger.warning(f"初始化OpenAI模型失败: {e}")
            
        # 初始化千问模型（阿里云）
        try:
            models["qwen-max"] = QianfanLLMEndpoint(
                model="qwen-max",
                temperature=0.7
            )
            models["ernie-bot-4"] = QianfanLLMEndpoint(
                model="ernie-bot-4",
                temperature=0.7
            )
        except Exception as e:
            logger.warning(f"初始化千帆模型失败: {e}")
            
        return models
        
    async def generate_parallel(self,
                               prompt: str,
                               model_configs: List[ModelConfig],
                               system_prompt: Optional[str] = None,
                               callback: Optional[Callable] = None) -> List[GenerationResult]:
        """
        从多个模型并行生成内容。
        
        Args:
            prompt: 发送给所有模型的用户提示
            model_configs: 模型配置列表
            system_prompt: 可选的系统提示
            callback: 可选的进度更新回调
            
        Returns:
            所有模型的生成结果列表
        """
        tasks = []
        results = []
        
        for config in model_configs:
            # 为每个模型创建任务
            task = asyncio.create_task(
                self._generate_with_model(
                    prompt=prompt,
                    config=config,
                    system_prompt=system_prompt,
                    callback=callback
                )
            )
            tasks.append(task)
            
        # 等待所有任务完成
        completed_results = await asyncio.gather(*tasks, return_exceptions=True)
        
        # 处理结果
        for i, result in enumerate(completed_results):
            if isinstance(result, Exception):
                # 处理失败的任务
                error_result = GenerationResult(
                    model=model_configs[i].name,
                    content="",
                    tokens=0,
                    time_taken=0,
                    status=ModelStatus.FAILED,
                    error=str(result)
                )
                results.append(error_result)
            else:
                results.append(result)
                
        return results
        
    async def _generate_with_model(self,
                                  prompt: str,
                                  config: ModelConfig,
                                  system_prompt: Optional[str] = None,
                                  callback: Optional[Callable] = None) -> GenerationResult:
        """
        使用单个模型生成内容。
        
        Args:
            prompt: 用户提示
            config: 模型配置
            system_prompt: 可选的系统提示
            callback: 可选的进度回调
            
        Returns:
            生成结果
        """
        async with self.semaphore:  # 限制并发执行
            await self.rate_limiter.acquire()  # 速率限制
            
            start_time = time.time()
            
            try:
                # 更新状态为生成中
                if callback:
                    await callback(config.name, ModelStatus.GENERATING)
                    
                # 获取模型
                if config.name not in self.models:
                    raise ValueError(f"模型 {config.name} 不可用")
                    
                model = self.models[config.name]
                
                # 构建提示模板
                if system_prompt:
                    prompt_template = ChatPromptTemplate.from_messages([
                        ("system", system_prompt),
                        ("user", prompt)
                    ])
                else:
                    prompt_template = ChatPromptTemplate.from_messages([
                        ("user", prompt)
                    ])
                    
                # 创建链
                chain = LLMChain(llm=model, prompt=prompt_template)
                
                # 带超时的生成
                try:
                    result = await asyncio.wait_for(
                        self._async_generate(chain, {"input": prompt}),
                        timeout=config.timeout
                    )
                    content = result.get("text", "")
                    
                    # 计算token数（改进的估算方法）
                    # 英文单词 + 中文字符*1.5 + 标点符号*0.5
                    import re
                    english_tokens = len(re.findall(r'\b\w+\b', content))
                    chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', content))
                    punctuation = len(re.findall(r'[^\w\s\u4e00-\u9fff]', content))
                    tokens = english_tokens + chinese_chars * 1.5 + punctuation * 0.5
                    
                    time_taken = time.time() - start_time
                    
                    # 更新状态为已完成
                    if callback:
                        await callback(config.name, ModelStatus.COMPLETED)
                        
                    return GenerationResult(
                        model=config.name,
                        content=content,
                        tokens=int(tokens),
                        time_taken=time_taken,
                        status=ModelStatus.COMPLETED,
                        metadata={"parameters": config.parameters}
                    )
                    
                except asyncio.TimeoutError:
                    if callback:
                        await callback(config.name, ModelStatus.TIMEOUT)
                        
                    return GenerationResult(
                        model=config.name,
                        content="",
                        tokens=0,
                        time_taken=config.timeout,
                        status=ModelStatus.TIMEOUT,
                        error=f"生成超时（{config.timeout}秒）"
                    )
                    
            except Exception as e:
                logger.error(f"使用 {config.name} 生成时出错: {e}")
                logger.error(traceback.format_exc())
                
                if callback:
                    await callback(config.name, ModelStatus.FAILED)
                    
                return GenerationResult(
                    model=config.name,
                    content="",
                    tokens=0,
                    time_taken=time.time() - start_time,
                    status=ModelStatus.FAILED,
                    error=str(e)
                )
                
    async def _async_generate(self, chain: LLMChain, inputs: Dict[str, Any]) -> Dict[str, Any]:
        """
        在异步上下文中运行同步的LangChain生成。
        
        Args:
            chain: LangChain LLM链
            inputs: 输入字典
            
        Returns:
            生成结果字典
        """
        loop = asyncio.get_event_loop()
        with ThreadPoolExecutor() as executor:
            result = await loop.run_in_executor(
                executor,
                chain.invoke,
                inputs
            )
        return result
        
    async def aggregate_results(self, results: List[GenerationResult]) -> Dict[str, Any]:
        """
        聚合多个模型的结果。
        
        Args:
            results: 生成结果列表
            
        Returns:
            聚合的统计信息和元数据
        """
        successful = [r for r in results if r.status == ModelStatus.COMPLETED]
        failed = [r for r in results if r.status in [ModelStatus.FAILED, ModelStatus.TIMEOUT]]
        
        total_time = sum(r.time_taken for r in results)
        total_tokens = sum(r.tokens for r in successful)
        
        return {
            "total_models": len(results),
            "successful": len(successful),
            "failed": len(failed),
            "total_time": total_time,
            "average_time": total_time / len(results) if results else 0,
            "total_tokens": total_tokens,
            "success_rate": len(successful) / len(results) if results else 0,
            "models": {
                r.model: {
                    "status": r.status.value,
                    "time": r.time_taken,
                    "tokens": r.tokens,
                    "error": r.error
                }
                for r in results
            }
        }


class AsyncRateLimiter:
    """用于API调用的简单异步速率限制器"""
    
    def __init__(self, rate_per_minute: int):
        """
        初始化速率限制器。
        
        Args:
            rate_per_minute: 每分钟最大调用次数
        """
        self.rate_per_minute = rate_per_minute
        self.min_interval = 60.0 / rate_per_minute
        self.last_call = 0
        self.lock = asyncio.Lock()
        
    async def acquire(self):
        """必要时等待以遵守速率限制"""
        async with self.lock:
            current = time.time()
            time_since_last = current - self.last_call
            
            if time_since_last < self.min_interval:
                wait_time = self.min_interval - time_since_last
                await asyncio.sleep(wait_time)
                
            self.last_call = time.time()