"""
LLM批处理调用模块
提供批量LLM调用功能，提高吞吐量和效率
"""

from typing import List, Dict, Any, Optional
import asyncio
from functools import wraps
import hashlib
import logging
from collections import OrderedDict
import time

from src.utils.metrics import track_llm_call, LLM_CALLS, LLM_TOKENS

logger = logging.getLogger(__name__)

class BatchLLMProcessor:
    """LLM批处理器"""
    
    def __init__(self, batch_size: int = 10, max_concurrent: int = 5, cache_size: int = 1000):
        """
        初始化批处理器
        
        Args:
            batch_size: 每批处理的消息数量
            max_concurrent: 最大并发批次数
            cache_size: 缓存大小
        """
        self.batch_size = batch_size
        self.max_concurrent = max_concurrent
        self.semaphore = asyncio.Semaphore(max_concurrent)
        self._llm = None
        # 使用LRU缓存存储LLM调用结果
        self._cache = OrderedDict()
        self._cache_size = cache_size
        self._cache_lock = asyncio.Lock()
    
    def _get_llm(self):
        """延迟获取LLM实例"""
        if self._llm is None:
            from src.research_core.model import get_llm
            self._llm = get_llm()
        return self._llm
    
    def _generate_cache_key(self, messages: List[Dict[str, Any]]) -> str:
        """生成缓存键"""
        # 将消息序列化为字符串并生成MD5哈希
        messages_str = str(sorted(messages, key=lambda x: x.get('role', '')))
        return hashlib.md5(messages_str.encode('utf-8')).hexdigest()
    
    async def _get_from_cache(self, cache_key: str) -> Optional[str]:
        """从缓存获取结果"""
        async with self._cache_lock:
            if cache_key in self._cache:
                # 将访问的项移到末尾（LRU）
                result = self._cache.pop(cache_key)
                self._cache[cache_key] = result
                return result
            return None
    
    async def _set_cache(self, cache_key: str, result: str):
        """设置缓存结果"""
        async with self._cache_lock:
            self._cache[cache_key] = result
            # 如果缓存超过限制，移除最久未使用的项
            if len(self._cache) > self._cache_size:
                self._cache.popitem(last=False)
    
    async def process_batch(self, messages_list: List[List[Dict[str, Any]]], 
                          model_type: str = "standard") -> List[str]:
        """
        批量处理消息列表
        
        Args:
            messages_list: 消息列表的列表
            model_type: 模型类型（standard/fast）
            
        Returns:
            List[str]: 处理结果列表
        """
        if not messages_list:
            return []
        
        results: List[str] = [""] * len(messages_list)
        uncached_indices = []
        cached_results = []
        
        # 检查缓存
        cache_checks = []
        for i, messages in enumerate(messages_list):
            cache_key = self._generate_cache_key(messages)
            cache_checks.append((i, cache_key, self._get_from_cache(cache_key)))
        
        # 等待所有缓存检查完成
        for i, cache_key, cache_task in cache_checks:
            cached_result = await cache_task
            if cached_result is not None:
                results[i] = cached_result
                cached_results.append((i, cache_key, cached_result))
            else:
                uncached_indices.append((i, cache_key))
        
        logger.debug(f"缓存命中 {len(cached_results)} 个请求，需要处理 {len(uncached_indices)} 个请求")
        
        # 处理未缓存的请求
        if uncached_indices:
            # 提取未缓存的消息
            uncached_messages = [messages_list[i] for i, _ in uncached_indices]
            
            # 分批处理
            batches = [uncached_messages[i:i + self.batch_size] 
                      for i in range(0, len(uncached_messages), self.batch_size)]
            
            # 并发处理批次
            batch_tasks = []
            for batch in batches:
                task = asyncio.create_task(self._process_single_batch(batch, model_type))
                batch_tasks.append(task)
            
            # 等待所有批次完成
            batch_results = await asyncio.gather(*batch_tasks, return_exceptions=True)
            
            # 合并结果
            result_index = 0
            for batch_idx, batch_result in enumerate(batch_results):
                if isinstance(batch_result, Exception):
                    logger.error(f"批处理失败: {batch_result}")
                    # 为失败的批次添加空结果
                    batch_size = len(batches[batch_idx])
                    for _ in range(batch_size):
                        if result_index < len(uncached_indices):
                            orig_index, cache_key = uncached_indices[result_index]
                            results[orig_index] = f"处理失败: {str(batch_result)}"
                            result_index += 1
                elif isinstance(batch_result, list):
                    for result in batch_result:
                        if result_index < len(uncached_indices):
                            orig_index, cache_key = uncached_indices[result_index]
                            results[orig_index] = result
                            # 添加到缓存
                            asyncio.create_task(self._set_cache(cache_key, result))
                            result_index += 1
                else:
                    logger.warning(f"意外的结果类型: {type(batch_result)}")
                    batch_size = len(batches[batch_idx]) if batch_idx < len(batches) else 0
                    for _ in range(batch_size):
                        if result_index < len(uncached_indices):
                            orig_index, cache_key = uncached_indices[result_index]
                            results[orig_index] = "未知错误"
                            result_index += 1
        
        # 填充任何剩余的空结果
        for i, result in enumerate(results):
            if result is None:
                results[i] = ""
        
        return results
    
    async def _process_single_batch(self, batch: List[List[Dict[str, Any]]], 
                                  model_type: str) -> List[str]:
        """处理单个批次"""
        async with self.semaphore:
            try:
                llm = self._get_llm()
                if llm is None:
                    raise ValueError("LLM实例未初始化")
                
                results = []
                
                # 检查是否支持流式处理
                if hasattr(llm, 'astream'):
                    # 流式处理每个消息
                    for messages in batch:
                        response_chunks = []
                        async for chunk in llm.astream(messages):
                            response_chunks.append(chunk)
                        response = ''.join(str(chunk) for chunk in response_chunks)
                        results.append(response)
                        
                        # 更新指标
                        LLM_CALLS.inc()
                        if hasattr(response, '__len__'):
                            LLM_TOKENS.inc(len(str(response)))
                else:
                    # 批量同步调用
                    for messages in batch:
                        response = await asyncio.to_thread(llm.invoke, messages)
                        result = response.content if hasattr(response, 'content') else str(response)
                        results.append(result)
                        
                        # 更新指标
                        LLM_CALLS.inc()
                        if hasattr(response, 'content'):
                            LLM_TOKENS.inc(len(response.content))
                        else:
                            LLM_TOKENS.inc(len(str(response)))
                
                return results
                
            except Exception as e:
                logger.error(f"批次处理失败: {e}")
                raise e
    
    @track_llm_call()
    async def process_single(self, messages: List[Dict[str, Any]], 
                           model_type: str = "standard") -> str:
        """
        处理单个消息（保持向后兼容）
        
        Args:
            messages: 消息列表
            model_type: 模型类型
            
        Returns:
            str: 处理结果
        """
        # 检查缓存
        cache_key = self._generate_cache_key(messages)
        cached_result = await self._get_from_cache(cache_key)
        if cached_result is not None:
            return cached_result
        
        results = await self.process_batch([messages], model_type)
        result = results[0] if results else ""
        
        # 添加到缓存
        await self._set_cache(cache_key, result)
        
        return result

def batch_llm_call(batch_size: int = 10, max_concurrent: int = 5, cache_size: int = 1000):
    """
    LLM批处理装饰器
    
    Args:
        batch_size: 批处理大小
        max_concurrent: 最大并发数
        cache_size: 缓存大小
    """
    processor = BatchLLMProcessor(batch_size, max_concurrent, cache_size)
    
    def decorator(func):
        @wraps(func)
        async def wrapper(messages_list: List[List[Dict[str, Any]]], 
                         model_type: str = "standard") -> List[str]:
            return await processor.process_batch(messages_list, model_type)
        return wrapper
    return decorator

# 全局批处理器实例
global_batch_processor = BatchLLMProcessor()

@batch_llm_call(batch_size=10, max_concurrent=3)
async def batch_invoke_llm(messages_list: List[List[Dict[str, Any]]], 
                          model_type: str = "standard") -> List[str]:
    """
    批量调用LLM（主要接口）
    
    Args:
        messages_list: 消息列表的列表
        model_type: 模型类型
        
    Returns:
        List[str]: 处理结果列表
    """
    return await global_batch_processor.process_batch(messages_list, model_type)

async def batch_analysis_agent(questions: List[str], search_results: List[str]) -> List[str]:
    """
    批量分析多个查询
    
    Args:
        questions: 问题列表
        search_results: 搜索结果列表
        
    Returns:
        List[str]: 分析结果列表
    """
    from src.research_core.prompts import ANALYST_PROMPT
    
    messages_list = []
    for question, results in zip(questions, search_results):
        messages = ANALYST_PROMPT.format_messages(
            question=question,
            search_results=results
        )
        messages_list.append(messages)
    
    return await batch_invoke_llm(messages_list)

# 性能优化：预编译提示词模板
class PromptTemplateCache:
    """提示词模板缓存"""
    
    def __init__(self):
        self._cache = {}
    
    def get_template(self, template_name: str, **kwargs) -> List[Dict[str, Any]]:
        """获取或创建提示词模板"""
        cache_key = f"{template_name}:{str(sorted(kwargs.items()))}"
        
        if cache_key not in self._cache:
            from src.research_core import prompts
            template_class = getattr(prompts, template_name, None)
            if template_class:
                self._cache[cache_key] = template_class.format_messages(**kwargs)
            else:
                raise ValueError(f"提示词模板 {template_name} 不存在")
        
        return self._cache[cache_key]

# 全局模板缓存
template_cache = PromptTemplateCache()