"""
并发处理模块 - 提升测试用例生成效率
"""

import asyncio
import logging
from typing import List, Dict, Any, Optional
from concurrent.futures import ThreadPoolExecutor
import time

from ..models.schemas import TestCaseRequest, TestCaseResponse, TestCase
from ..config.settings import get_settings

logger = logging.getLogger(__name__)

class ConcurrentProcessor:
    """并发处理器"""
    
    def __init__(self, llm_service):
        self.llm_service = llm_service
        self.settings = get_settings()
        self.max_concurrent = self.settings.max_workers
        
    async def process_modules_concurrently(
        self, 
        modules: List[Dict[str, Any]], 
        base_request: TestCaseRequest
    ) -> TestCaseResponse:
        """并发处理多个模块"""
        
        if not self.settings.enable_concurrent_processing:
            logger.info("并发处理已禁用，使用串行处理")
            return await self._process_modules_serially(modules, base_request)
        
        if len(modules) <= 1:
            logger.info("模块数量<=1，使用串行处理")
            return await self._process_modules_serially(modules, base_request)
        
        logger.info(f"开始并发处理 {len(modules)} 个模块，最大并发数: {self.max_concurrent}")
        
        # 限制并发数量
        max_concurrent_modules = min(
            len(modules), 
            getattr(self.settings, 'max_concurrent_modules', 3)
        )
        
        all_test_cases = []
        all_scenarios = []
        
        # 分批并发处理
        for i in range(0, len(modules), max_concurrent_modules):
            batch_modules = modules[i:i + max_concurrent_modules]
            logger.info(f"处理第 {i//max_concurrent_modules + 1} 批，包含 {len(batch_modules)} 个模块")
            
            # 创建并发任务
            tasks = []
            for module in batch_modules:
                task = self._process_single_module(module, base_request)
                tasks.append(task)
            
            # 并发执行
            batch_start_time = time.time()
            try:
                results = await asyncio.gather(*tasks, return_exceptions=True)
                batch_end_time = time.time()
                batch_duration = batch_end_time - batch_start_time
                
                logger.info(f"第 {i//max_concurrent_modules + 1} 批处理完成，耗时: {batch_duration:.1f}秒")
                
                # 处理结果
                for j, result in enumerate(results):
                    if isinstance(result, Exception):
                        logger.error(f"模块 {batch_modules[j]['name']} 处理失败: {str(result)}")
                        continue
                    
                    if result and result.test_cases:
                        all_test_cases.extend(result.test_cases)
                        all_scenarios.extend(result.test_scenarios)
                        logger.info(f"模块 {batch_modules[j]['name']} 生成了 {len(result.test_cases)} 个用例")
                
                # 批次间延迟
                if i + max_concurrent_modules < len(modules):
                    delay = getattr(self.settings, 'batch_delay_seconds', 0.5)
                    logger.info(f"等待 {delay} 秒后处理下一批...")
                    await asyncio.sleep(delay)
                    
            except Exception as e:
                logger.error(f"第 {i//max_concurrent_modules + 1} 批处理失败: {str(e)}")
                continue
        
        # 重新编号测试用例
        for i, test_case in enumerate(all_test_cases):
            test_case.id = f"TC{i+1:03d}"
        
        logger.info(f"并发处理完成，总共生成 {len(all_test_cases)} 个用例")
        
        return TestCaseResponse(
            test_cases=all_test_cases,
            test_scenarios=all_scenarios,
            test_types=["functional"],
            test_executors=["qa"],
            test_design_methods=["use_case"],
            estimated_effort=f"约{len(all_test_cases) * 30}分钟",
            requirement=base_request.requirement or "",
            project_name=base_request.project_name or "",
            module_name=base_request.module_name or ""
        )
    
    async def _process_single_module(
        self, 
        module: Dict[str, Any], 
        base_request: TestCaseRequest
    ) -> Optional[TestCaseResponse]:
        """处理单个模块"""
        
        module_name = module.get('name', '未知模块')
        module_content = module.get('content', '')
        
        try:
            # 创建模块请求
            module_request = TestCaseRequest(
                requirement=module_content,
                project_name=base_request.project_name,
                module_name=module_name,
                additional_info=base_request.additional_info,
                test_types=base_request.test_types,
                test_executors=base_request.test_executors,
                test_design_methods=base_request.test_design_methods
            )
            
            # 记录开始时间
            start_time = time.time()
            
            # 调用LLM服务生成用例
            response = await self.llm_service._generate_test_cases_single(module_request)
            
            # 记录结束时间
            end_time = time.time()
            duration = end_time - start_time
            
            logger.info(f"模块 {module_name} 处理完成，耗时: {duration:.1f}秒，生成用例: {len(response.test_cases)}个")
            
            return response
            
        except Exception as e:
            logger.error(f"处理模块 {module_name} 失败: {str(e)}")
            return None
    
    async def _process_modules_serially(
        self, 
        modules: List[Dict[str, Any]], 
        base_request: TestCaseRequest
    ) -> TestCaseResponse:
        """串行处理模块（回退方案）"""
        
        logger.info(f"使用串行处理 {len(modules)} 个模块")
        
        all_test_cases = []
        all_scenarios = []
        
        for i, module in enumerate(modules):
            module_name = module.get('name', f'模块{i+1}')
            
            try:
                logger.info(f"处理模块 {i+1}/{len(modules)}: {module_name}")
                
                result = await self._process_single_module(module, base_request)
                
                if result and result.test_cases:
                    all_test_cases.extend(result.test_cases)
                    all_scenarios.extend(result.test_scenarios)
                
                # 模块间延迟
                if i < len(modules) - 1:
                    delay = getattr(self.settings, 'batch_delay_seconds', 0.5)
                    await asyncio.sleep(delay)
                    
            except Exception as e:
                logger.error(f"串行处理模块 {module_name} 失败: {str(e)}")
                continue
        
        # 重新编号测试用例
        for i, test_case in enumerate(all_test_cases):
            test_case.id = f"TC{i+1:03d}"
        
        return TestCaseResponse(
            test_cases=all_test_cases,
            test_scenarios=all_scenarios,
            test_types=["functional"],
            test_executors=["qa"],
            test_design_methods=["use_case"],
            estimated_effort=f"约{len(all_test_cases) * 30}分钟",
            requirement=base_request.requirement or "",
            project_name=base_request.project_name or "",
            module_name=base_request.module_name or ""
        )

class PerformanceMonitor:
    """性能监控器"""
    
    def __init__(self):
        self.metrics = {
            'total_requests': 0,
            'concurrent_requests': 0,
            'serial_requests': 0,
            'avg_response_time': 0,
            'total_test_cases': 0,
            'api_calls': 0,
            'cache_hits': 0,
            'cache_misses': 0
        }
    
    def record_request(self, is_concurrent: bool, response_time: float, test_cases_count: int):
        """记录请求指标"""
        self.metrics['total_requests'] += 1
        
        if is_concurrent:
            self.metrics['concurrent_requests'] += 1
        else:
            self.metrics['serial_requests'] += 1
        
        # 更新平均响应时间
        total_time = self.metrics['avg_response_time'] * (self.metrics['total_requests'] - 1)
        self.metrics['avg_response_time'] = (total_time + response_time) / self.metrics['total_requests']
        
        self.metrics['total_test_cases'] += test_cases_count
    
    def record_api_call(self):
        """记录API调用"""
        self.metrics['api_calls'] += 1
    
    def record_cache_hit(self):
        """记录缓存命中"""
        self.metrics['cache_hits'] += 1
    
    def record_cache_miss(self):
        """记录缓存未命中"""
        self.metrics['cache_misses'] += 1
    
    def get_metrics(self) -> Dict[str, Any]:
        """获取性能指标"""
        cache_total = self.metrics['cache_hits'] + self.metrics['cache_misses']
        cache_hit_rate = (self.metrics['cache_hits'] / cache_total * 100) if cache_total > 0 else 0
        
        concurrent_rate = (self.metrics['concurrent_requests'] / self.metrics['total_requests'] * 100) if self.metrics['total_requests'] > 0 else 0
        
        return {
            **self.metrics,
            'cache_hit_rate': cache_hit_rate,
            'concurrent_rate': concurrent_rate,
            'avg_test_cases_per_request': self.metrics['total_test_cases'] / self.metrics['total_requests'] if self.metrics['total_requests'] > 0 else 0
        }
    
    def log_metrics(self):
        """记录性能指标到日志"""
        metrics = self.get_metrics()
        
        logger.info("📊 性能指标统计:")
        logger.info(f"   总请求数: {metrics['total_requests']}")
        logger.info(f"   并发处理率: {metrics['concurrent_rate']:.1f}%")
        logger.info(f"   平均响应时间: {metrics['avg_response_time']:.1f}秒")
        logger.info(f"   总生成用例数: {metrics['total_test_cases']}")
        logger.info(f"   平均每请求用例数: {metrics['avg_test_cases_per_request']:.1f}")
        logger.info(f"   API调用次数: {metrics['api_calls']}")
        logger.info(f"   缓存命中率: {metrics['cache_hit_rate']:.1f}%")

# 全局性能监控器实例
performance_monitor = PerformanceMonitor()
