#!/usr/bin/env python3
"""
RedFire API Gateway性能测试
负载测试、压力测试和故障恢复测试
"""

import asyncio
import aiohttp
import time
import statistics
import json
from typing import List, Dict, Any, Optional
from dataclasses import dataclass, asdict
from pathlib import Path
import argparse

import structlog


structlog.configure(
    processors=[
        structlog.stdlib.filter_by_level,
        structlog.stdlib.add_logger_name,
        structlog.stdlib.add_log_level,
        structlog.stdlib.PositionalArgumentsFormatter(),
        structlog.processors.TimeStamper(fmt="iso"),
        structlog.processors.StackInfoRenderer(),
        structlog.processors.format_exc_info,
        structlog.processors.UnicodeDecoder(),
        structlog.processors.JSONRenderer()
    ],
    context_class=dict,
    logger_factory=structlog.stdlib.LoggerFactory(),
    wrapper_class=structlog.stdlib.BoundLogger,
    cache_logger_on_first_use=True,
)

logger = structlog.get_logger(__name__)


@dataclass
class RequestMetrics:
    """请求指标"""
    timestamp: float
    response_time: float
    status_code: int
    success: bool
    error_message: Optional[str] = None


@dataclass
class TestScenario:
    """测试场景"""
    name: str
    concurrent_users: int
    requests_per_user: int
    duration_seconds: Optional[int] = None
    ramp_up_seconds: int = 0
    endpoint: str = "/api/v1/market/quotes"
    method: str = "GET"
    payload: Optional[Dict] = None


@dataclass
class PerformanceResults:
    """性能测试结果"""
    scenario_name: str
    total_requests: int
    successful_requests: int
    failed_requests: int
    success_rate: float
    total_duration: float
    requests_per_second: float
    avg_response_time: float
    min_response_time: float
    max_response_time: float
    p50_response_time: float
    p90_response_time: float
    p95_response_time: float
    p99_response_time: float
    error_distribution: Dict[str, int]


class GatewayPerformanceTester:
    """网关性能测试器"""
    
    def __init__(self, gateway_url: str = "http://localhost:8000"):
        self.gateway_url = gateway_url.rstrip('/')
        self.session: Optional[aiohttp.ClientSession] = None
        self.results: List[PerformanceResults] = []
    
    async def __aenter__(self):
        connector = aiohttp.TCPConnector(limit=1000, limit_per_host=100)
        timeout = aiohttp.ClientTimeout(total=30, connect=10)
        self.session = aiohttp.ClientSession(connector=connector, timeout=timeout)
        return self
    
    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self.session:
            await self.session.close()
    
    async def run_performance_tests(self):
        """运行性能测试套件"""
        logger.info("🚀 Starting RedFire Gateway Performance Tests")
        
        # 定义测试场景
        scenarios = [
            TestScenario(
                name="Light Load",
                concurrent_users=10,
                requests_per_user=50,
                ramp_up_seconds=5
            ),
            TestScenario(
                name="Medium Load", 
                concurrent_users=50,
                requests_per_user=100,
                ramp_up_seconds=10
            ),
            TestScenario(
                name="Heavy Load",
                concurrent_users=100,
                requests_per_user=50,
                ramp_up_seconds=15
            ),
            TestScenario(
                name="Stress Test",
                concurrent_users=200,
                requests_per_user=25,
                ramp_up_seconds=20
            ),
            TestScenario(
                name="Spike Test",
                concurrent_users=500,
                requests_per_user=10,
                ramp_up_seconds=5
            ),
            TestScenario(
                name="Endurance Test",
                concurrent_users=30,
                requests_per_user=200,
                ramp_up_seconds=10
            ),
            TestScenario(
                name="Mixed Workload",
                concurrent_users=50,
                requests_per_user=50,
                endpoint="/api/v1/users",
                method="POST",
                payload={"name": "test_user", "email": "test@example.com"},
                ramp_up_seconds=10
            )
        ]
        
        for scenario in scenarios:
            logger.info(f"📊 Running scenario: {scenario.name}")
            try:
                result = await self._run_scenario(scenario)
                self.results.append(result)
                await self._log_scenario_results(result)
                
                # 短暂休息让系统恢复
                await asyncio.sleep(5)
                
            except Exception as e:
                logger.error(f"❌ Scenario {scenario.name} failed: {e}", exc_info=True)
        
        await self._generate_performance_report()
    
    async def _run_scenario(self, scenario: TestScenario) -> PerformanceResults:
        """运行单个测试场景"""
        logger.info(f"Starting {scenario.name} with {scenario.concurrent_users} users")
        
        start_time = time.time()
        all_metrics: List[RequestMetrics] = []
        
        # 创建用户任务
        user_tasks = []
        for user_id in range(scenario.concurrent_users):
            # 计算延迟启动时间
            delay = (user_id / scenario.concurrent_users) * scenario.ramp_up_seconds
            
            task = asyncio.create_task(
                self._simulate_user(scenario, user_id, delay),
                name=f"user_{user_id}"
            )
            user_tasks.append(task)
        
        # 等待所有用户完成
        user_results = await asyncio.gather(*user_tasks, return_exceptions=True)
        
        # 收集所有指标
        for result in user_results:
            if isinstance(result, list):
                all_metrics.extend(result)
            elif isinstance(result, Exception):
                logger.error(f"User task failed: {result}")
        
        end_time = time.time()
        total_duration = end_time - start_time
        
        # 分析结果
        return self._analyze_metrics(scenario.name, all_metrics, total_duration)
    
    async def _simulate_user(
        self, 
        scenario: TestScenario, 
        user_id: int, 
        delay: float
    ) -> List[RequestMetrics]:
        """模拟单个用户的请求"""
        if delay > 0:
            await asyncio.sleep(delay)
        
        metrics = []
        
        for request_id in range(scenario.requests_per_user):
            try:
                metric = await self._make_request(scenario, user_id, request_id)
                metrics.append(metric)
                
                # 模拟用户思考时间（随机0-100ms）
                await asyncio.sleep(0.001 * (1 + (hash(f"{user_id}_{request_id}") % 100)))
                
            except Exception as e:
                metric = RequestMetrics(
                    timestamp=time.time(),
                    response_time=0.0,
                    status_code=0,
                    success=False,
                    error_message=str(e)
                )
                metrics.append(metric)
        
        return metrics
    
    async def _make_request(
        self, 
        scenario: TestScenario, 
        user_id: int, 
        request_id: int
    ) -> RequestMetrics:
        """发送单个请求"""
        url = f"{self.gateway_url}{scenario.endpoint}"
        start_time = time.time()
        
        try:
            # 准备请求参数
            kwargs = {
                "headers": {
                    "User-Agent": f"PerformanceTest-User{user_id}",
                    "X-Request-ID": f"perf-{user_id}-{request_id}-{int(time.time() * 1000)}"
                }
            }
            
            if scenario.payload:
                kwargs["json"] = scenario.payload
            
            async with self.session.request(scenario.method, url, **kwargs) as response:
                # 读取响应（但不处理内容）
                await response.read()
                
                response_time = time.time() - start_time
                
                return RequestMetrics(
                    timestamp=start_time,
                    response_time=response_time,
                    status_code=response.status,
                    success=response.status < 400
                )
                
        except Exception as e:
            response_time = time.time() - start_time
            return RequestMetrics(
                timestamp=start_time,
                response_time=response_time,
                status_code=0,
                success=False,
                error_message=str(e)
            )
    
    def _analyze_metrics(
        self, 
        scenario_name: str, 
        metrics: List[RequestMetrics], 
        total_duration: float
    ) -> PerformanceResults:
        """分析性能指标"""
        if not metrics:
            return PerformanceResults(
                scenario_name=scenario_name,
                total_requests=0,
                successful_requests=0,
                failed_requests=0,
                success_rate=0.0,
                total_duration=total_duration,
                requests_per_second=0.0,
                avg_response_time=0.0,
                min_response_time=0.0,
                max_response_time=0.0,
                p50_response_time=0.0,
                p90_response_time=0.0,
                p95_response_time=0.0,
                p99_response_time=0.0,
                error_distribution={}
            )
        
        # 基本统计
        total_requests = len(metrics)
        successful_requests = sum(1 for m in metrics if m.success)
        failed_requests = total_requests - successful_requests
        success_rate = successful_requests / total_requests if total_requests > 0 else 0.0
        
        # 响应时间统计
        response_times = [m.response_time for m in metrics if m.success]
        
        if response_times:
            avg_response_time = statistics.mean(response_times)
            min_response_time = min(response_times)
            max_response_time = max(response_times)
            
            # 百分位数
            sorted_times = sorted(response_times)
            p50_response_time = statistics.median(sorted_times)
            p90_response_time = sorted_times[int(len(sorted_times) * 0.90)] if sorted_times else 0.0
            p95_response_time = sorted_times[int(len(sorted_times) * 0.95)] if sorted_times else 0.0
            p99_response_time = sorted_times[int(len(sorted_times) * 0.99)] if sorted_times else 0.0
        else:
            avg_response_time = min_response_time = max_response_time = 0.0
            p50_response_time = p90_response_time = p95_response_time = p99_response_time = 0.0
        
        # 错误分布
        error_distribution = {}
        for metric in metrics:
            if not metric.success:
                error_key = metric.error_message or f"HTTP_{metric.status_code}"
                error_distribution[error_key] = error_distribution.get(error_key, 0) + 1
        
        # 请求速率
        requests_per_second = total_requests / total_duration if total_duration > 0 else 0.0
        
        return PerformanceResults(
            scenario_name=scenario_name,
            total_requests=total_requests,
            successful_requests=successful_requests,
            failed_requests=failed_requests,
            success_rate=success_rate,
            total_duration=total_duration,
            requests_per_second=requests_per_second,
            avg_response_time=avg_response_time,
            min_response_time=min_response_time,
            max_response_time=max_response_time,
            p50_response_time=p50_response_time,
            p90_response_time=p90_response_time,
            p95_response_time=p95_response_time,
            p99_response_time=p99_response_time,
            error_distribution=error_distribution
        )
    
    async def _log_scenario_results(self, result: PerformanceResults):
        """记录场景结果"""
        logger.info(f"✅ {result.scenario_name} completed")
        logger.info(f"   Total Requests: {result.total_requests}")
        logger.info(f"   Success Rate: {result.success_rate:.2%}")
        logger.info(f"   RPS: {result.requests_per_second:.2f}")
        logger.info(f"   Avg Response Time: {result.avg_response_time*1000:.2f}ms")
        logger.info(f"   P95 Response Time: {result.p95_response_time*1000:.2f}ms")
        
        if result.failed_requests > 0:
            logger.warning(f"   Failed Requests: {result.failed_requests}")
            for error, count in result.error_distribution.items():
                logger.warning(f"     {error}: {count}")
    
    async def _generate_performance_report(self):
        """生成性能测试报告"""
        if not self.results:
            logger.warning("No performance test results to report")
            return
        
        # 创建详细报告
        report = {
            "test_summary": {
                "timestamp": time.time(),
                "total_scenarios": len(self.results),
                "gateway_url": self.gateway_url
            },
            "scenarios": [asdict(result) for result in self.results],
            "overall_statistics": self._calculate_overall_stats()
        }
        
        # 保存报告
        report_path = Path(__file__).parent / "performance_report.json"
        with open(report_path, 'w') as f:
            json.dump(report, f, indent=2)
        
        # 生成CSV报告
        await self._generate_csv_report()
        
        # 打印摘要
        logger.info("📊 Performance Test Report")
        logger.info(f"   Total Scenarios: {len(self.results)}")
        
        for result in self.results:
            logger.info(f"   {result.scenario_name}:")
            logger.info(f"     Success Rate: {result.success_rate:.2%}")
            logger.info(f"     RPS: {result.requests_per_second:.2f}")
            logger.info(f"     Avg RT: {result.avg_response_time*1000:.2f}ms")
            logger.info(f"     P95 RT: {result.p95_response_time*1000:.2f}ms")
        
        logger.info(f"   Detailed report: {report_path}")
    
    def _calculate_overall_stats(self) -> Dict[str, Any]:
        """计算总体统计"""
        if not self.results:
            return {}
        
        total_requests = sum(r.total_requests for r in self.results)
        total_successful = sum(r.successful_requests for r in self.results)
        total_failed = sum(r.failed_requests for r in self.results)
        
        avg_success_rate = statistics.mean([r.success_rate for r in self.results])
        avg_rps = statistics.mean([r.requests_per_second for r in self.results])
        avg_response_time = statistics.mean([r.avg_response_time for r in self.results])
        
        return {
            "total_requests": total_requests,
            "total_successful": total_successful,
            "total_failed": total_failed,
            "overall_success_rate": total_successful / total_requests if total_requests > 0 else 0.0,
            "average_success_rate": avg_success_rate,
            "average_rps": avg_rps,
            "average_response_time": avg_response_time,
            "max_rps": max([r.requests_per_second for r in self.results]),
            "min_rps": min([r.requests_per_second for r in self.results])
        }
    
    async def _generate_csv_report(self):
        """生成CSV格式报告"""
        csv_path = Path(__file__).parent / "performance_report.csv"
        
        with open(csv_path, 'w') as f:
            # 写入标题行
            f.write("Scenario,Total Requests,Success Rate,RPS,Avg RT(ms),P50 RT(ms),P90 RT(ms),P95 RT(ms),P99 RT(ms),Failed Requests\n")
            
            # 写入数据行
            for result in self.results:
                f.write(f"{result.scenario_name},{result.total_requests},{result.success_rate:.4f},"
                       f"{result.requests_per_second:.2f},{result.avg_response_time*1000:.2f},"
                       f"{result.p50_response_time*1000:.2f},{result.p90_response_time*1000:.2f},"
                       f"{result.p95_response_time*1000:.2f},{result.p99_response_time*1000:.2f},"
                       f"{result.failed_requests}\n")
        
        logger.info(f"CSV report saved to: {csv_path}")


async def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="RedFire Gateway Performance Tests")
    parser.add_argument(
        "--gateway-url",
        default="http://localhost:8000",
        help="Gateway URL (default: http://localhost:8000)"
    )
    parser.add_argument(
        "--quick",
        action="store_true",
        help="Run quick performance tests with reduced load"
    )
    
    args = parser.parse_args()
    
    async with GatewayPerformanceTester(args.gateway_url) as tester:
        if args.quick:
            # 快速测试模式，减少负载
            logger.info("Running in quick test mode")
            # 可以在这里修改测试场景
        
        await tester.run_performance_tests()


if __name__ == "__main__":
    asyncio.run(main())
