# -*- coding: utf-8 -*-
"""
压力测试结果分析工具

提供测试结果的分析和可视化功能：
- 解析Locust测试结果
- 生成性能分析报告
- 创建可视化图表
- 性能指标对比
"""

import json
import csv
import os
import sys
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Optional, Tuple
from dataclasses import dataclass

# 添加项目根目录到Python路径
project_root = Path(__file__).parent.parent.parent
sys.path.insert(0, str(project_root))

from utils.logger import get_logger
from utils.helpers import get_timestamp, format_file_size
from tests.performance.config import PERFORMACE_MONITORING, REPORT_CONFIG

# 配置日志
logger = get_logger(__name__)


@dataclass
class TestResult:
    """
    测试结果数据类
    """
    scenario: str
    environment: str
    timestamp: str
    duration: float
    total_requests: int
    failed_requests: int
    avg_response_time: float
    max_response_time: float
    min_response_time: float
    rps: float
    failure_rate: float
    percentiles: Dict[int, float]
    endpoint_stats: List[Dict]


class PerformanceAnalyzer:
    """
    性能测试结果分析器
    """
    
    def __init__(self, report_dir: Optional[str] = None):
        self.report_dir = Path(report_dir or REPORT_CONFIG["output_dir"])
        self.project_root = project_root
        
        # 确保报告目录存在
        self.report_dir.mkdir(parents=True, exist_ok=True)
    
    def parse_csv_results(self, csv_file: Path) -> Optional[TestResult]:
        """
        解析CSV格式的测试结果
        
        Args:
            csv_file: CSV文件路径
        
        Returns:
            TestResult: 解析后的测试结果
        """
        try:
            with open(csv_file, 'r', encoding='utf-8') as f:
                reader = csv.DictReader(f)
                rows = list(reader)
            
            if not rows:
                logger.warning(f"CSV文件为空: {csv_file}")
                return None
            
            # 解析文件名获取场景和环境信息
            filename = csv_file.stem
            parts = filename.split('_')
            scenario = parts[0] if len(parts) > 0 else "unknown"
            environment = parts[1] if len(parts) > 1 else "unknown"
            timestamp = parts[2] if len(parts) > 2 else get_timestamp()
            
            # 计算总体统计
            total_requests = sum(int(row.get('Request Count', 0)) for row in rows)
            failed_requests = sum(int(row.get('Failure Count', 0)) for row in rows)
            
            # 计算加权平均响应时间
            total_response_time = sum(
                float(row.get('Average Response Time', 0)) * int(row.get('Request Count', 0))
                for row in rows
            )
            avg_response_time = total_response_time / total_requests if total_requests > 0 else 0
            
            # 获取最大和最小响应时间
            max_response_time = max(
                float(row.get('Max Response Time', 0)) for row in rows
            )
            min_response_time = min(
                float(row.get('Min Response Time', 0)) for row in rows if float(row.get('Min Response Time', 0)) > 0
            )
            
            # 计算RPS和失败率
            rps = sum(float(row.get('Requests/s', 0)) for row in rows)
            failure_rate = failed_requests / total_requests if total_requests > 0 else 0
            
            # 解析百分位数（如果存在）
            percentiles = {}
            for p in [50, 75, 90, 95, 99]:
                col_name = f'{p}%'
                if col_name in rows[0]:
                    percentiles[p] = max(float(row.get(col_name, 0)) for row in rows)
            
            # 整理接口统计信息
            endpoint_stats = []
            for row in rows:
                if row.get('Name') and row.get('Name') != 'Aggregated':
                    endpoint_stats.append({
                        'name': row.get('Name', ''),
                        'method': row.get('Type', ''),
                        'requests': int(row.get('Request Count', 0)),
                        'failures': int(row.get('Failure Count', 0)),
                        'avg_response_time': float(row.get('Average Response Time', 0)),
                        'max_response_time': float(row.get('Max Response Time', 0)),
                        'rps': float(row.get('Requests/s', 0))
                    })
            
            return TestResult(
                scenario=scenario,
                environment=environment,
                timestamp=timestamp,
                duration=0,  # CSV中通常不包含总时长
                total_requests=total_requests,
                failed_requests=failed_requests,
                avg_response_time=avg_response_time,
                max_response_time=max_response_time,
                min_response_time=min_response_time,
                rps=rps,
                failure_rate=failure_rate,
                percentiles=percentiles,
                endpoint_stats=endpoint_stats
            )
            
        except Exception as e:
            logger.error(f"解析CSV文件失败: {e}")
            return None
    
    def generate_analysis_report(self, result: TestResult) -> str:
        """
        生成分析报告
        
        Args:
            result: 测试结果
        
        Returns:
            str: 分析报告内容
        """
        report = []
        report.append("# 压力测试结果分析报告")
        report.append("")
        report.append(f"**测试场景**: {result.scenario}")
        report.append(f"**测试环境**: {result.environment}")
        report.append(f"**测试时间**: {result.timestamp}")
        report.append("")
        
        # 总体统计
        report.append("## 总体统计")
        report.append("")
        report.append(f"- **总请求数**: {result.total_requests:,}")
        report.append(f"- **失败请求数**: {result.failed_requests:,}")
        report.append(f"- **成功率**: {(1 - result.failure_rate) * 100:.2f}%")
        report.append(f"- **失败率**: {result.failure_rate * 100:.2f}%")
        report.append(f"- **平均响应时间**: {result.avg_response_time:.2f}ms")
        report.append(f"- **最大响应时间**: {result.max_response_time:.2f}ms")
        report.append(f"- **最小响应时间**: {result.min_response_time:.2f}ms")
        report.append(f"- **平均RPS**: {result.rps:.2f}")
        report.append("")
        
        # 响应时间百分位数
        if result.percentiles:
            report.append("## 响应时间百分位数")
            report.append("")
            for p, value in sorted(result.percentiles.items()):
                report.append(f"- **P{p}**: {value:.2f}ms")
            report.append("")
        
        # 性能评估
        report.append("## 性能评估")
        report.append("")
        
        # 响应时间评估
        if result.avg_response_time < 500:
            response_grade = "优秀"
        elif result.avg_response_time < 1000:
            response_grade = "良好"
        elif result.avg_response_time < 2000:
            response_grade = "一般"
        else:
            response_grade = "较差"
        
        report.append(f"- **响应时间**: {response_grade} (平均{result.avg_response_time:.2f}ms)")
        
        # 成功率评估
        if result.failure_rate < 0.01:
            success_grade = "优秀"
        elif result.failure_rate < 0.05:
            success_grade = "良好"
        elif result.failure_rate < 0.10:
            success_grade = "一般"
        else:
            success_grade = "较差"
        
        report.append(f"- **成功率**: {success_grade} ({(1 - result.failure_rate) * 100:.2f}%)")
        
        # RPS评估
        if result.rps > 100:
            rps_grade = "优秀"
        elif result.rps > 50:
            rps_grade = "良好"
        elif result.rps > 20:
            rps_grade = "一般"
        else:
            rps_grade = "较差"
        
        report.append(f"- **吞吐量**: {rps_grade} ({result.rps:.2f} RPS)")
        report.append("")
        
        # 接口详细统计
        if result.endpoint_stats:
            report.append("## 接口详细统计")
            report.append("")
            report.append("| 接口 | 方法 | 请求数 | 失败数 | 平均响应时间(ms) | 最大响应时间(ms) | RPS |")
            report.append("|------|------|--------|--------|------------------|------------------|-----|")
            
            for stat in sorted(result.endpoint_stats, key=lambda x: x['requests'], reverse=True):
                report.append(
                    f"| {stat['name']} | {stat['method']} | {stat['requests']:,} | "
                    f"{stat['failures']:,} | {stat['avg_response_time']:.2f} | "
                    f"{stat['max_response_time']:.2f} | {stat['rps']:.2f} |"
                )
            report.append("")
        
        # 建议和优化
        report.append("## 建议和优化")
        report.append("")
        
        suggestions = []
        
        if result.failure_rate > 0.05:
            suggestions.append("- 失败率较高，建议检查服务器资源和错误日志")
        
        if result.avg_response_time > 2000:
            suggestions.append("- 平均响应时间较长，建议优化数据库查询和业务逻辑")
        
        if result.max_response_time > 10000:
            suggestions.append("- 最大响应时间过长，建议添加超时控制和熔断机制")
        
        if result.rps < 20:
            suggestions.append("- 吞吐量较低，建议优化服务器配置和代码性能")
        
        # 根据百分位数给出建议
        if result.percentiles.get(95, 0) > result.avg_response_time * 3:
            suggestions.append("- P95响应时间远高于平均值，存在性能不稳定问题")
        
        if not suggestions:
            suggestions.append("- 整体性能表现良好，可以考虑增加负载进行进一步测试")
        
        for suggestion in suggestions:
            report.append(suggestion)
        
        report.append("")
        report.append("---")
        report.append(f"*报告生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*")
        
        return "\n".join(report)
    
    def save_analysis_report(self, result: TestResult, report_content: str) -> Path:
        """
        保存分析报告
        
        Args:
            result: 测试结果
            report_content: 报告内容
        
        Returns:
            Path: 报告文件路径
        """
        filename = f"{result.scenario}_{result.environment}_{result.timestamp}_analysis.md"
        report_file = self.report_dir / filename
        
        with open(report_file, 'w', encoding='utf-8') as f:
            f.write(report_content)
        
        logger.info(f"分析报告已保存: {report_file}")
        return report_file
    
    def compare_results(self, results: List[TestResult]) -> str:
        """
        比较多个测试结果
        
        Args:
            results: 测试结果列表
        
        Returns:
            str: 比较报告内容
        """
        if len(results) < 2:
            return "需要至少两个测试结果进行比较"
        
        report = []
        report.append("# 压力测试结果对比报告")
        report.append("")
        
        # 创建对比表格
        report.append("## 测试结果对比")
        report.append("")
        
        headers = ["指标"] + [f"{r.scenario}_{r.environment}_{r.timestamp}" for r in results]
        report.append("| " + " | ".join(headers) + " |")
        report.append("|" + "---|" * len(headers))
        
        # 添加各项指标对比
        metrics = [
            ("总请求数", lambda r: f"{r.total_requests:,}"),
            ("失败请求数", lambda r: f"{r.failed_requests:,}"),
            ("成功率(%)", lambda r: f"{(1 - r.failure_rate) * 100:.2f}"),
            ("平均响应时间(ms)", lambda r: f"{r.avg_response_time:.2f}"),
            ("最大响应时间(ms)", lambda r: f"{r.max_response_time:.2f}"),
            ("平均RPS", lambda r: f"{r.rps:.2f}")
        ]
        
        for metric_name, metric_func in metrics:
            row = [metric_name] + [metric_func(r) for r in results]
            report.append("| " + " | ".join(row) + " |")
        
        report.append("")
        
        # 性能趋势分析
        report.append("## 性能趋势分析")
        report.append("")
        
        # 响应时间趋势
        response_times = [r.avg_response_time for r in results]
        if len(set(response_times)) > 1:
            if response_times[-1] < response_times[0]:
                report.append("- **响应时间**: 呈改善趋势 📈")
            else:
                report.append("- **响应时间**: 呈恶化趋势 📉")
        else:
            report.append("- **响应时间**: 保持稳定")
        
        # 成功率趋势
        success_rates = [(1 - r.failure_rate) * 100 for r in results]
        if len(set(success_rates)) > 1:
            if success_rates[-1] > success_rates[0]:
                report.append("- **成功率**: 呈改善趋势 📈")
            else:
                report.append("- **成功率**: 呈恶化趋势 📉")
        else:
            report.append("- **成功率**: 保持稳定")
        
        # RPS趋势
        rps_values = [r.rps for r in results]
        if len(set(rps_values)) > 1:
            if rps_values[-1] > rps_values[0]:
                report.append("- **吞吐量**: 呈改善趋势 📈")
            else:
                report.append("- **吞吐量**: 呈恶化趋势 📉")
        else:
            report.append("- **吞吐量**: 保持稳定")
        
        report.append("")
        report.append("---")
        report.append(f"*报告生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*")
        
        return "\n".join(report)
    
    def find_latest_results(self, scenario: Optional[str] = None, 
                          environment: Optional[str] = None,
                          limit: int = 10) -> List[Path]:
        """
        查找最新的测试结果文件
        
        Args:
            scenario: 场景过滤
            environment: 环境过滤
            limit: 返回数量限制
        
        Returns:
            List[Path]: 结果文件路径列表
        """
        pattern = "*_stats.csv"
        if scenario and environment:
            pattern = f"{scenario}_{environment}_*_stats.csv"
        elif scenario:
            pattern = f"{scenario}_*_stats.csv"
        elif environment:
            pattern = f"*_{environment}_*_stats.csv"
        
        csv_files = list(self.report_dir.glob(pattern))
        csv_files.sort(key=lambda x: x.stat().st_mtime, reverse=True)
        
        return csv_files[:limit]
    
    def analyze_latest_result(self, scenario: Optional[str] = None,
                            environment: Optional[str] = None) -> Optional[Path]:
        """
        分析最新的测试结果
        
        Args:
            scenario: 场景过滤
            environment: 环境过滤
        
        Returns:
            Optional[Path]: 分析报告文件路径
        """
        latest_files = self.find_latest_results(scenario, environment, 1)
        
        if not latest_files:
            logger.warning("未找到测试结果文件")
            return None
        
        csv_file = latest_files[0]
        logger.info(f"分析测试结果: {csv_file}")
        
        result = self.parse_csv_results(csv_file)
        if not result:
            logger.error("解析测试结果失败")
            return None
        
        report_content = self.generate_analysis_report(result)
        report_file = self.save_analysis_report(result, report_content)
        
        return report_file


def main():
    """
    主函数 - 命令行工具
    """
    import argparse
    
    parser = argparse.ArgumentParser(description="压力测试结果分析工具")
    parser.add_argument("--scenario", help="场景过滤")
    parser.add_argument("--environment", help="环境过滤")
    parser.add_argument("--compare", action="store_true", help="比较多个结果")
    parser.add_argument("--limit", type=int, default=5, help="比较结果数量限制")
    
    args = parser.parse_args()
    
    analyzer = PerformanceAnalyzer()
    
    if args.compare:
        # 比较多个结果
        csv_files = analyzer.find_latest_results(
            scenario=args.scenario,
            environment=args.environment,
            limit=args.limit
        )
        
        if len(csv_files) < 2:
            logger.error("需要至少两个测试结果进行比较")
            return
        
        results = []
        for csv_file in csv_files:
            result = analyzer.parse_csv_results(csv_file)
            if result:
                results.append(result)
        
        if len(results) < 2:
            logger.error("有效的测试结果不足两个")
            return
        
        compare_content = analyzer.compare_results(results)
        
        # 保存比较报告
        timestamp = get_timestamp()
        compare_file = analyzer.report_dir / f"comparison_{timestamp}.md"
        with open(compare_file, 'w', encoding='utf-8') as f:
            f.write(compare_content)
        
        logger.info(f"比较报告已保存: {compare_file}")
        
    else:
        # 分析最新结果
        report_file = analyzer.analyze_latest_result(
            scenario=args.scenario,
            environment=args.environment
        )
        
        if report_file:
            logger.info(f"分析完成: {report_file}")
        else:
            logger.error("分析失败")


if __name__ == "__main__":
    main()