"""
性能基准测试配置
定义各种性能测试的基准值和配置参数
"""

from dataclasses import dataclass
from typing import Dict, List, Any
import os


@dataclass
class PerformanceBenchmark:
    """性能基准配置"""
    
    # API响应时间基准（秒）
    api_response_time_limits = {
        "GET /api/v1/cameras/": 0.5,
        "POST /api/v1/cameras/": 1.0,
        "GET /api/v1/alerts/": 0.5,
        "POST /api/v1/alerts/": 0.8,
        "GET /api/v1/system/health": 0.2,
        "GET /api/v1/system/statistics": 1.0,
        "POST /api/v1/ai-algorithm/person-detection/start": 2.0,
        "GET /api/v1/ai-algorithm/person-detection/results/{id}": 0.8,
        "POST /api/v1/data-analysis/crowd-density/analyze": 1.5,
        "GET /api/v1/data-analysis/crowd-density/{id}": 0.6
    }
    
    # 并发测试配置
    concurrent_test_config = {
        "max_concurrent_users": 50,
        "test_duration_seconds": 30,
        "ramp_up_time_seconds": 10,
        "acceptable_error_rate": 0.05  # 5%
    }
    
    # 数据库性能基准
    database_performance = {
        "max_query_time_seconds": 1.0,
        "max_connection_pool_size": 20,
        "connection_timeout_seconds": 30,
        "query_timeout_seconds": 60
    }
    
    # AI推理性能基准
    ai_inference_performance = {
        "person_detection_max_time_seconds": 2.0,
        "behavior_analysis_max_time_seconds": 3.0,
        "crowd_density_analysis_max_time_seconds": 1.5,
        "max_gpu_memory_usage_mb": 4096,
        "max_cpu_usage_percent": 80
    }
    
    # 内存使用基准
    memory_usage_limits = {
        "max_heap_size_mb": 2048,
        "max_process_memory_mb": 4096,
        "memory_leak_threshold_mb": 100  # 每小时增长不超过100MB
    }
    
    # 网络性能基准
    network_performance = {
        "max_video_stream_latency_ms": 500,
        "max_websocket_message_delay_ms": 100,
        "max_file_upload_time_seconds": 30,
        "min_throughput_mbps": 10
    }


class BenchmarkEnvironment:
    """基准测试环境配置"""
    
    def __init__(self):
        self.environment = os.getenv("TEST_ENV", "development")
        self.benchmarks = self._get_environment_benchmarks()
    
    def _get_environment_benchmarks(self) -> PerformanceBenchmark:
        """根据环境获取对应的性能基准"""
        
        if self.environment == "production":
            # 生产环境更严格的性能要求
            benchmark = PerformanceBenchmark()
            # 将所有时间限制减少20%
            for key, value in benchmark.api_response_time_limits.items():
                benchmark.api_response_time_limits[key] = value * 0.8
            return benchmark
            
        elif self.environment == "staging":
            # 预发布环境标准性能要求
            return PerformanceBenchmark()
            
        else:
            # 开发环境相对宽松的性能要求
            benchmark = PerformanceBenchmark()
            # 将所有时间限制增加50%
            for key, value in benchmark.api_response_time_limits.items():
                benchmark.api_response_time_limits[key] = value * 1.5
            return benchmark
    
    def get_benchmark_for_test(self, test_type: str) -> Dict[str, Any]:
        """获取特定测试类型的基准配置"""
        
        benchmark_map = {
            "api_response": self.benchmarks.api_response_time_limits,
            "concurrent": self.benchmarks.concurrent_test_config,
            "database": self.benchmarks.database_performance,
            "ai_inference": self.benchmarks.ai_inference_performance,
            "memory": self.benchmarks.memory_usage_limits,
            "network": self.benchmarks.network_performance
        }
        
        return benchmark_map.get(test_type, {})


class TestDataGenerator:
    """测试数据生成器"""
    
    @staticmethod
    def generate_camera_data(count: int = 10) -> List[Dict[str, Any]]:
        """生成摄像头测试数据"""
        cameras = []
        for i in range(count):
            cameras.append({
                "name": f"测试摄像头{i+1}",
                "location": f"测试位置{i+1}",
                "ip_address": f"192.168.1.{100+i}",
                "port": 554,
                "username": "admin",
                "password": "123456",
                "stream_url": f"rtsp://192.168.1.{100+i}:554/stream1",
                "status": "online" if i % 4 != 0 else "offline",
                "position": {"x": float(i * 50), "y": float(i * 30), "z": 10.0}
            })
        return cameras
    
    @staticmethod
    def generate_alert_data(count: int = 20) -> List[Dict[str, Any]]:
        """生成告警测试数据"""
        alert_types = ["crowd_density", "behavior_anomaly", "equipment_failure", "security_breach"]
        levels = ["low", "medium", "high", "critical"]
        locations = ["教学楼A", "图书馆", "食堂", "宿舍楼", "体育馆"]
        
        alerts = []
        for i in range(count):
            alerts.append({
                "title": f"测试告警{i+1}",
                "message": f"这是第{i+1}个测试告警的详细描述",
                "level": levels[i % len(levels)],
                "location": locations[i % len(locations)],
                "alert_type": alert_types[i % len(alert_types)],
                "metadata": {"test_id": i+1, "generated": True}
            })
        return alerts
    
    @staticmethod
    def generate_ai_task_data(camera_ids: List[str]) -> List[Dict[str, Any]]:
        """生成AI任务测试数据"""
        task_types = ["person_detection", "behavior_analysis", "crowd_density", "object_detection"]
        
        tasks = []
        for camera_id in camera_ids:
            for task_type in task_types:
                tasks.append({
                    "camera_id": camera_id,
                    "task_type": task_type,
                    "parameters": {
                        "confidence_threshold": 0.5,
                        "analysis_interval": 1.0,
                        "enable_tracking": True
                    }
                })
        return tasks


class PerformanceMetrics:
    """性能指标收集器"""
    
    def __init__(self):
        self.metrics = {
            "response_times": [],
            "error_rates": [],
            "throughput": [],
            "resource_usage": [],
            "concurrent_users": []
        }
    
    def record_response_time(self, endpoint: str, response_time: float):
        """记录响应时间"""
        self.metrics["response_times"].append({
            "endpoint": endpoint,
            "response_time": response_time,
            "timestamp": time.time()
        })
    
    def record_error_rate(self, endpoint: str, error_count: int, total_requests: int):
        """记录错误率"""
        error_rate = error_count / total_requests if total_requests > 0 else 0
        self.metrics["error_rates"].append({
            "endpoint": endpoint,
            "error_rate": error_rate,
            "error_count": error_count,
            "total_requests": total_requests,
            "timestamp": time.time()
        })
    
    def record_throughput(self, endpoint: str, requests_per_second: float):
        """记录吞吐量"""
        self.metrics["throughput"].append({
            "endpoint": endpoint,
            "rps": requests_per_second,
            "timestamp": time.time()
        })
    
    def record_resource_usage(self, cpu_percent: float, memory_mb: float, gpu_memory_mb: float = 0):
        """记录资源使用情况"""
        self.metrics["resource_usage"].append({
            "cpu_percent": cpu_percent,
            "memory_mb": memory_mb,
            "gpu_memory_mb": gpu_memory_mb,
            "timestamp": time.time()
        })
    
    def get_summary(self) -> Dict[str, Any]:
        """获取性能指标摘要"""
        summary = {}
        
        # 响应时间统计
        if self.metrics["response_times"]:
            response_times = [m["response_time"] for m in self.metrics["response_times"]]
            summary["response_time"] = {
                "avg": sum(response_times) / len(response_times),
                "min": min(response_times),
                "max": max(response_times),
                "p95": sorted(response_times)[int(len(response_times) * 0.95)]
            }
        
        # 错误率统计
        if self.metrics["error_rates"]:
            error_rates = [m["error_rate"] for m in self.metrics["error_rates"]]
            summary["error_rate"] = {
                "avg": sum(error_rates) / len(error_rates),
                "max": max(error_rates)
            }
        
        # 吞吐量统计
        if self.metrics["throughput"]:
            throughputs = [m["rps"] for m in self.metrics["throughput"]]
            summary["throughput"] = {
                "avg_rps": sum(throughputs) / len(throughputs),
                "max_rps": max(throughputs)
            }
        
        # 资源使用统计
        if self.metrics["resource_usage"]:
            cpu_usage = [m["cpu_percent"] for m in self.metrics["resource_usage"]]
            memory_usage = [m["memory_mb"] for m in self.metrics["resource_usage"]]
            
            summary["resource_usage"] = {
                "avg_cpu_percent": sum(cpu_usage) / len(cpu_usage),
                "max_cpu_percent": max(cpu_usage),
                "avg_memory_mb": sum(memory_usage) / len(memory_usage),
                "max_memory_mb": max(memory_usage)
            }
        
        return summary
    
    def export_to_file(self, filename: str):
        """导出指标到文件"""
        import json
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump({
                "metrics": self.metrics,
                "summary": self.get_summary()
            }, f, indent=2, ensure_ascii=False)


# 全局性能指标收集器实例
performance_metrics = PerformanceMetrics()

# 全局基准环境配置实例
benchmark_env = BenchmarkEnvironment()