"""
性能基准测试框架 - 提供可复现的性能指标和详细的测试报告
"""
import asyncio
import time
import json
import psutil
import platform
from typing import Dict, Any, List, Optional, Callable
from dataclasses import dataclass, asdict
from datetime import datetime
import aiohttp
import statistics
from concurrent.futures import ThreadPoolExecutor

from utils.logger import Logger
from utils.constants import AgentType

logger = get_logger("performance_benchmark")


@dataclass
class SystemInfo:
    """系统信息"""
    cpu_count: int
    cpu_model: str
    memory_total_gb: float
    memory_available_gb: float
    disk_total_gb: float
    disk_available_gb: float
    os_name: str
    os_version: str
    python_version: str
    
    @classmethod
    def collect(cls) -> 'SystemInfo':
        """收集系统信息"""
        memory = psutil.virtual_memory()
        disk = psutil.disk_usage('/')
        
        return cls(
            cpu_count=psutil.cpu_count(),
            cpu_model=platform.processor(),
            memory_total_gb=memory.total / (1024**3),
            memory_available_gb=memory.available / (1024**3),
            disk_total_gb=disk.total / (1024**3),
            disk_available_gb=disk.free / (1024**3),
            os_name=platform.system(),
            os_version=platform.version(),
            python_version=platform.python_version()
        )


@dataclass
class TestConfiguration:
    """测试配置"""
    test_name: str
    concurrent_users: int
    total_requests: int
    ramp_up_time: int  # 秒
    test_duration: int   # 秒
    warmup_duration: int  # 预热时间（秒）
    agent_type: str
    request_payload: Dict[str, Any]
    
    def to_dict(self) -> Dict[str, Any]:
        return asdict(self)


@dataclass
class PerformanceMetrics:
    """性能指标"""
    total_requests: int
    successful_requests: int
    failed_requests: int
    timeout_requests: int
    
    # 响应时间指标（毫秒）
    min_response_time: float
    max_response_time: float
    mean_response_time: float
    median_response_time: float
    p90_response_time: float
    p95_response_time: float
    p99_response_time: float
    
    # 吞吐量指标
    requests_per_second: float
    throughput_mb_per_second: float
    
    # 错误率
    error_rate: float
    
    # 资源使用
    avg_cpu_usage: float
    max_cpu_usage: float
    avg_memory_usage_gb: float
    max_memory_usage_gb: float
    
    # 时间戳
    test_start_time: datetime
    test_end_time: datetime
    
    def to_dict(self) -> Dict[str, Any]:
        result = asdict(self)
        # 转换datetime为字符串
        result['test_start_time'] = self.test_start_time.isoformat()
        result['test_end_time'] = self.test_end_time.isoformat()
        return result


class PerformanceMonitor:
    """性能监控器"""
    
    def __init__(self):
        self.cpu_usage_samples: List[float] = []
        self.memory_usage_samples: List[float] = []
        self.monitoring = False
        
    def start_monitoring(self):
        """开始监控"""
        self.monitoring = True
        self.cpu_usage_samples.clear()
        self.memory_usage_samples.clear()
        
    def stop_monitoring(self):
        """停止监控"""
        self.monitoring = False
        
    def collect_sample(self):
        """收集样本"""
        if self.monitoring:
            self.cpu_usage_samples.append(psutil.cpu_percent(interval=0.1))
            self.memory_usage_samples.append(psutil.virtual_memory().used / (1024**3))
    
    def get_resource_metrics(self) -> Dict[str, float]:
        """获取资源使用指标"""
        if not self.cpu_usage_samples:
            return {
                'avg_cpu_usage': 0.0,
                'max_cpu_usage': 0.0,
                'avg_memory_usage_gb': 0.0,
                'max_memory_usage_gb': 0.0
            }
        
        return {
            'avg_cpu_usage': statistics.mean(self.cpu_usage_samples),
            'max_cpu_usage': max(self.cpu_usage_samples),
            'avg_memory_usage_gb': statistics.mean(self.memory_usage_samples),
            'max_memory_usage_gb': max(self.memory_usage_samples)
        }


class LoadGenerator:
    """负载生成器"""
    
    def __init__(self, base_url: str):
        self.base_url = base_url
        self.session: Optional[aiohttp.ClientSession] = None
        
    async def __aenter__(self):
        self.session = aiohttp.ClientSession(
            timeout=aiohttp.ClientTimeout(total=30),
            connector=aiohttp.TCPConnector(
                limit=1000,
                limit_per_host=100,
                ttl_dns_cache=300,
                use_dns_cache=True,
            )
        )
        return self
        
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self.session:
            await self.session.close()
    
    async def send_request(self, endpoint: str, payload: Dict[str, Any]) -> Dict[str, Any]:
        """发送单个请求"""
        start_time = time.time()
        
        try:
            url = f"{self.base_url}{endpoint}"
            
            async with self.session.post(url, json=payload) as response:
                response_data = await response.json()
                
                end_time = time.time()
                response_time = (end_time - start_time) * 1000  # 转换为毫秒
                
                return {
                    'success': response.status == 200,
                    'status_code': response.status,
                    'response_time_ms': response_time,
                    'response_size_bytes': len(json.dumps(response_data)),
                    'error': None,
                    'response_data': response_data
                }
                
        except asyncio.TimeoutError:
            return {
                'success': False,
                'status_code': 408,
                'response_time_ms': (time.time() - start_time) * 1000,
                'response_size_bytes': 0,
                'error': 'timeout',
                'response_data': None
            }
            
        except Exception as e:
            return {
                'success': False,
                'status_code': 500,
                'response_time_ms': (time.time() - start_time) * 1000,
                'response_size_bytes': 0,
                'error': str(e),
                'response_data': None
            }


class PerformanceBenchmark:
    """性能基准测试框架"""
    
    def __init__(self, base_url: str = "http://localhost:8000"):
        self.base_url = base_url
        self.system_info = SystemInfo.collect()
        self.monitor = PerformanceMonitor()
        self.results: List[Dict[str, Any]] = []
        
    async def run_single_agent_test(self, config: TestConfiguration) -> PerformanceMetrics:
        """运行单智能体性能测试"""
        logger.info(f"开始单智能体性能测试: {config.test_name}")
        
        test_start_time = datetime.utcnow()
        
        # 预热阶段
        logger.info(f"预热阶段: {config.warmup_duration}秒")
        await self._warmup_phase(config)
        
        # 开始监控
        self.monitor.start_monitoring()
        
        # 执行测试
        logger.info(f"执行测试: {config.test_duration}秒, 并发用户: {config.concurrent_users}")
        test_results = await self._execute_load_test(config)
        
        # 停止监控
        self.monitor.stop_monitoring()
        test_end_time = datetime.utcnow()
        
        # 计算性能指标
        metrics = self._calculate_metrics(test_results, test_start_time, test_end_time)
        
        logger.info(f"测试完成: {config.test_name}, RPS: {metrics.requests_per_second:.2f}, P99: {metrics.p99_response_time:.2f}ms")
        
        return metrics
    
    async def run_multi_agent_collaboration_test(self, config: TestConfiguration) -> PerformanceMetrics:
        """运行多智能体协作性能测试"""
        logger.info(f"开始多智能体协作性能测试: {config.test_name}")
        
        # 这里应该实现复杂的多智能体协作测试场景
        # 暂时使用单智能体测试作为示例
        return await self.run_single_agent_test(config)
    
    async def _warmup_phase(self, config: TestConfiguration):
        """预热阶段"""
        async with LoadGenerator(self.base_url) as generator:
            # 发送少量请求进行预热
            warmup_requests = max(10, config.concurrent_users // 2)
            
            for i in range(warmup_requests):
                await generator.send_request("/api/v1/search", config.request_payload)
                await asyncio.sleep(0.1)  # 小间隔
                
        # 等待系统稳定
        await asyncio.sleep(config.warmup_duration)
    
    async def _execute_load_test(self, config: TestConfiguration) -> List[Dict[str, Any]]:
        """执行负载测试"""
        results = []
        
        async with LoadGenerator(self.base_url) as generator:
            # 计算请求间隔
            requests_per_user = config.total_requests // config.concurrent_users
            interval = config.test_duration / requests_per_user
            
            # 创建并发任务
            tasks = []
            for user_id in range(config.concurrent_users):
                task = asyncio.create_task(
                    self._user_simulation(generator, user_id, requests_per_user, interval, config)
                )
                tasks.append(task)
            
            # 等待所有任务完成
            user_results = await asyncio.gather(*tasks, return_exceptions=True)
            
            # 收集结果
            for user_result in user_results:
                if isinstance(user_result, list):
                    results.extend(user_result)
                else:
                    logger.error(f"用户模拟异常: {user_result}")
            
            # 收集监控样本
            for _ in range(config.test_duration):
                self.monitor.collect_sample()
                await asyncio.sleep(1)
        
        return results
    
    async def _user_simulation(self, generator: LoadGenerator, user_id: int, 
                              request_count: int, interval: float, config: TestConfiguration) -> List[Dict[str, Any]]:
        """模拟单个用户行为"""
        results = []
        
        for i in range(request_count):
            try:
                # 根据智能体类型选择不同的端点
                if config.agent_type == AgentType.TEXT_QA.value:
                    result = await generator.send_request("/api/v1/search", config.request_payload)
                elif config.agent_type == AgentType.DOC_QA.value:
                    result = await generator.send_request("/api/v1/doc_qa", config.request_payload)
                elif config.agent_type == AgentType.MULTI_MODAL.value:
                    result = await generator.send_request("/api/v1/multi_modal", config.request_payload)
                else:
                    result = await generator.send_request("/api/v1/search", config.request_payload)
                
                results.append(result)
                
            except Exception as e:
                logger.error(f"用户{user_id}请求失败: {str(e)}")
                results.append({
                    'success': False,
                    'response_time_ms': 0,
                    'error': str(e)
                })
            
            # 等待下一个请求
            await asyncio.sleep(interval)
        
        return results
    
    def _calculate_metrics(self, results: List[Dict[str, Any]], 
                          test_start_time: datetime, test_end_time: datetime) -> PerformanceMetrics:
        """计算性能指标"""
        
        # 基本统计
        total_requests = len(results)
        successful_requests = sum(1 for r in results if r.get('success', False))
        failed_requests = total_requests - successful_requests
        timeout_requests = sum(1 for r in results if r.get('error') == 'timeout')
        
        # 响应时间统计
        response_times = [r.get('response_time_ms', 0) for r in results if r.get('success', False)]
        
        if response_times:
            response_times.sort()
            min_response_time = min(response_times)
            max_response_time = max(response_times)
            mean_response_time = statistics.mean(response_times)
            median_response_time = statistics.median(response_times)
            p90_response_time = response_times[int(len(response_times) * 0.9)]
            p95_response_time = response_times[int(len(response_times) * 0.95)]
            p99_response_time = response_times[int(len(response_times) * 0.99)]
        else:
            min_response_time = max_response_time = mean_response_time = 0
            median_response_time = p90_response_time = p95_response_time = p99_response_time = 0
        
        # 吞吐量计算
        test_duration_seconds = (test_end_time - test_start_time).total_seconds()
        requests_per_second = successful_requests / test_duration_seconds if test_duration_seconds > 0 else 0
        
        # 响应大小统计
        response_sizes = [r.get('response_size_bytes', 0) for r in results if r.get('success', False)]
        total_response_size = sum(response_sizes)
        throughput_mb_per_second = (total_response_size / (1024 * 1024)) / test_duration_seconds if test_duration_seconds > 0 else 0
        
        # 错误率
        error_rate = failed_requests / total_requests if total_requests > 0 else 0
        
        # 资源使用
        resource_metrics = self.monitor.get_resource_metrics()
        
        return PerformanceMetrics(
            total_requests=total_requests,
            successful_requests=successful_requests,
            failed_requests=failed_requests,
            timeout_requests=timeout_requests,
            min_response_time=min_response_time,
            max_response_time=max_response_time,
            mean_response_time=mean_response_time,
            median_response_time=median_response_time,
            p90_response_time=p90_response_time,
            p95_response_time=p95_response_time,
            p99_response_time=p99_response_time,
            requests_per_second=requests_per_second,
            throughput_mb_per_second=throughput_mb_per_second,
            error_rate=error_rate,
            avg_cpu_usage=resource_metrics['avg_cpu_usage'],
            max_cpu_usage=resource_metrics['max_cpu_usage'],
            avg_memory_usage_gb=resource_metrics['avg_memory_usage_gb'],
            max_memory_usage_gb=resource_metrics['max_memory_usage_gb'],
            test_start_time=test_start_time,
            test_end_time=test_end_time
        )
    
    def generate_test_report(self, test_name: str, config: TestConfiguration, 
                           metrics: PerformanceMetrics) -> Dict[str, Any]:
        """生成测试报告"""
        report = {
            'test_metadata': {
                'test_name': test_name,
                'test_date': datetime.utcnow().isoformat(),
                'test_duration_seconds': (metrics.test_end_time - metrics.test_start_time).total_seconds()
            },
            'system_info': self.system_info.__dict__,
            'test_configuration': config.to_dict(),
            'performance_metrics': metrics.to_dict(),
            'analysis': {
                'performance_grade': self._grade_performance(metrics),
                'bottlenecks': self._identify_bottlenecks(metrics),
                'recommendations': self._generate_recommendations(metrics)
            }
        }
        
        return report
    
    def _grade_performance(self, metrics: PerformanceMetrics) -> str:
        """性能评级"""
        grade = "A"
        
        # 基于P99响应时间评级
        if metrics.p99_response_time > 1000:  # 1秒
            grade = "C"
        elif metrics.p99_response_time > 500:  # 500ms
            grade = "B"
        elif metrics.p99_response_time > 200:  # 200ms
            grade = "A-"
        
        # 基于错误率调整
        if metrics.error_rate > 0.05:  # 5%错误率
            grade = "D"
        elif metrics.error_rate > 0.01:  # 1%错误率
            grade = max(grade, "B")
        
        # 基于吞吐量调整
        if metrics.requests_per_second < 100:
            grade = max(grade, "B")
        elif metrics.requests_per_second > 1000:
            grade = min(grade, "A+")
        
        return grade
    
    def _identify_bottlenecks(self, metrics: PerformanceMetrics) -> List[str]:
        """识别性能瓶颈"""
        bottlenecks = []
        
        if metrics.p99_response_time > 1000:
            bottlenecks.append("高延迟：P99响应时间超过1秒")
        
        if metrics.error_rate > 0.05:
            bottlenecks.append("高错误率：错误率超过5%")
        
        if metrics.max_cpu_usage > 80:
            bottlenecks.append("CPU瓶颈：CPU使用率峰值超过80%")
        
        if metrics.timeout_requests > metrics.total_requests * 0.01:
            bottlenecks.append("超时问题：超时请求超过1%")
        
        if not bottlenecks:
            bottlenecks.append("无明显性能瓶颈")
        
        return bottlenecks
    
    def _generate_recommendations(self, metrics: PerformanceMetrics) -> List[str]:
        """生成优化建议"""
        recommendations = []
        
        if metrics.p99_response_time > 500:
            recommendations.append("考虑添加缓存机制减少响应时间")
            recommendations.append("优化数据库查询和索引")
        
        if metrics.error_rate > 0.01:
            recommendations.append("增加错误处理和重试机制")
            recommendations.append("检查服务端日志定位错误原因")
        
        if metrics.max_cpu_usage > 70:
            recommendations.append("考虑水平扩展增加服务器实例")
            recommendations.append("优化算法复杂度降低CPU使用")
        
        if metrics.requests_per_second < 100:
            recommendations.append("优化并发处理能力提升吞吐量")
        
        if not recommendations:
            recommendations.append("系统性能表现良好，继续监控关键指标")
        
        return recommendations
    
    async def save_report(self, report: Dict[str, Any], filename: Optional[str] = None):
        """保存测试报告"""
        if not filename:
            timestamp = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
            filename = f"performance_report_{timestamp}.json"
        
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(report, f, ensure_ascii=False, indent=2)
            
            logger.info(f"测试报告已保存: {filename}")
            
        except Exception as e:
            logger.error(f"保存测试报告失败: {str(e)}")
    
    def print_summary(self, metrics: PerformanceMetrics):
        """打印性能摘要"""
        print("\n" + "="*60)
        print("性能测试摘要")
        print("="*60)
        print(f"总请求数: {metrics.total_requests:,}")
        print(f"成功请求: {metrics.successful_requests:,} ({(1-metrics.error_rate)*100:.1f}%)")
        print(f"失败请求: {metrics.failed_requests:,} ({metrics.error_rate*100:.1f}%)")
        print(f"超时请求: {metrics.timeout_requests:,}")
        print()
        print("响应时间 (毫秒):")
        print(f"  最小值: {metrics.min_response_time:.2f}")
        print(f"  平均值: {metrics.mean_response_time:.2f}")
        print(f"  中位数: {metrics.median_response_time:.2f}")
        print(f"  P90: {metrics.p90_response_time:.2f}")
        print(f"  P95: {metrics.p95_response_time:.2f}")
        print(f"  P99: {metrics.p99_response_time:.2f}")
        print(f"  最大值: {metrics.max_response_time:.2f}")
        print()
        print("吞吐量:")
        print(f"  请求/秒: {metrics.requests_per_second:.2f}")
        print(f"  MB/秒: {metrics.throughput_mb_per_second:.2f}")
        print()
        print("资源使用:")
        print(f"  CPU平均使用率: {metrics.avg_cpu_usage:.1f}%")
        print(f"  CPU峰值使用率: {metrics.max_cpu_usage:.1f}%")
        print(f"  内存平均使用: {metrics.avg_memory_usage_gb:.2f} GB")
        print(f"  内存峰值使用: {metrics.max_memory_usage_gb:.2f} GB")
        print("="*60)


# 使用示例
async def run_performance_benchmark():
    """运行性能基准测试"""
    
    # 创建基准测试实例
    benchmark = PerformanceBenchmark("http://localhost:8000")
    
    # 定义测试配置
    test_configs = [
        TestConfiguration(
            test_name="文本问答_低并发",
            concurrent_users=10,
            total_requests=100,
            ramp_up_time=5,
            test_duration=30,
            warmup_duration=5,
            agent_type=AgentType.TEXT_QA.value,
            request_payload={
                "query": "什么是人工智能？",
                "user_id": "test_user",
                "task_id": "perf_test_001"
            }
        ),
        TestConfiguration(
            test_name="文本问答_中等并发",
            concurrent_users=50,
            total_requests=500,
            ramp_up_time=10,
            test_duration=60,
            warmup_duration=10,
            agent_type=AgentType.TEXT_QA.value,
            request_payload={
                "query": "机器学习和深度学习的区别是什么？",
                "user_id": "test_user",
                "task_id": "perf_test_002"
            }
        ),
        TestConfiguration(
            test_name="文本问答_高并发",
            concurrent_users=100,
            total_requests=1000,
            ramp_up_time=15,
            test_duration=120,
            warmup_duration=15,
            agent_type=AgentType.TEXT_QA.value,
            request_payload={
                "query": "请解释区块链技术的原理和应用",
                "user_id": "test_user", 
                "task_id": "perf_test_003"
            }
        )
    ]
    
    # 运行测试
    for config in test_configs:
        logger.info(f"开始测试: {config.test_name}")
        
        try:
            # 运行测试
            metrics = await benchmark.run_single_agent_test(config)
            
            # 生成报告
            report = benchmark.generate_test_report(config.test_name, config, metrics)
            
            # 保存报告
            await benchmark.save_report(report, f"report_{config.test_name}.json")
            
            # 打印摘要
            benchmark.print_summary(metrics)
            
        except Exception as e:
            logger.error(f"测试失败: {config.test_name}, error={str(e)}")
        
        # 测试间隔
        await asyncio.sleep(30)
    
    logger.info("所有性能测试完成")


if __name__ == "__main__":
    import asyncio
    asyncio.run(run_performance_benchmark())