import json
from datetime import datetime, timedelta
from collections import defaultdict, Counter
from app.models.database import db, Vulnerability, Scan, TargetRange
from app.utils.logger import logger

class VulnerabilityStatsService:
    """
    漏洞统计分析服务
    提供漏洞统计、趋势分析和报告功能
    """
    
    @staticmethod
    def get_vulnerability_summary(start_date=None, end_date=None, user_id=None):
        """
        获取漏洞统计摘要
        """
        try:
            query = Vulnerability.query.join(Scan)
            
            if user_id:
                query = query.filter(Scan.created_by == user_id)
            
            if start_date:
                query = query.filter(Scan.created_at >= start_date)
            
            if end_date:
                query = query.filter(Scan.created_at <= end_date)
            
            vulnerabilities = query.all()
            
            # 统计总漏洞数
            total_count = len(vulnerabilities)
            
            # 按类型统计
            type_counter = Counter(vuln.vuln_type for vuln in vulnerabilities)
            type_stats = [
                {"type": vuln_type, "count": count}
                for vuln_type, count in type_counter.items()
            ]
            
            # 按严重程度统计
            severity_counter = Counter(vuln.severity for vuln in vulnerabilities)
            severity_stats = [
                {"severity": severity, "count": count}
                for severity, count in severity_counter.items()
            ]
            
            # 按月份统计趋势
            monthly_stats = defaultdict(lambda: {
                "total": 0,
                "by_severity": {"low": 0, "medium": 0, "high": 0, "critical": 0}
            })
            
            for vuln in vulnerabilities:
                month_key = vuln.created_at.strftime("%Y-%m")
                monthly_stats[month_key]["total"] += 1
                if vuln.severity in monthly_stats[month_key]["by_severity"]:
                    monthly_stats[month_key]["by_severity"][vuln.severity] += 1
            
            monthly_trend = [
                {
                    "month": month,
                    "total": data["total"],
                    "severity_breakdown": data["by_severity"]
                }
                for month, data in sorted(monthly_stats.items())
            ]
            
            # 最常见的漏洞类型
            top_vulnerabilities = sorted(
                type_counter.items(), 
                key=lambda x: x[1], 
                reverse=True
            )[:5]
            
            return {
                "total_vulnerabilities": total_count,
                "by_type": type_stats,
                "by_severity": severity_stats,
                "monthly_trend": monthly_trend,
                "top_vulnerabilities": [
                    {"type": vuln_type, "count": count}
                    for vuln_type, count in top_vulnerabilities
                ],
                "average_vulnerabilities_per_scan": VulnerabilityStatsService._calculate_average_per_scan(
                    vulnerabilities, Scan.query.filter_by(created_by=user_id).count()
                )
            }
            
        except Exception as e:
            logger.error(f"Error getting vulnerability summary: {str(e)}")
            raise
    
    @staticmethod
    def _calculate_average_per_scan(vulnerabilities, scan_count):
        """计算平均每次扫描发现的漏洞数"""
        if scan_count == 0:
            return 0
        return round(len(vulnerabilities) / scan_count, 2)
    
    @staticmethod
    def get_target_vulnerability_stats(target_url, user_id=None):
        """
        获取特定目标的漏洞统计
        """
        try:
            query = Vulnerability.query.join(Scan).filter(Scan.target == target_url)
            
            if user_id:
                query = query.filter(Scan.created_by == user_id)
            
            vulnerabilities = query.all()
            
            # 按扫描时间分组
            scan_times = set(vuln.scan_id for vuln in vulnerabilities)
            scan_dates = [
                Scan.query.get(scan_id).created_at
                for scan_id in scan_times
            ]
            
            if not scan_dates:
                return {
                    "target_url": target_url,
                    "total_scans": 0,
                    "total_vulnerabilities": 0,
                    "vulnerability_history": []
                }
            
            # 按扫描时间排序
            scan_dates.sort()
            
            # 生成历史记录
            vulnerability_history = []
            for scan_date in scan_dates:
                scan_vulns = [
                    v for v in vulnerabilities 
                    if Scan.query.get(v.scan_id).created_at == scan_date
                ]
                
                severity_count = Counter(v.severity for v in scan_vulns)
                type_count = Counter(v.vuln_type for v in scan_vulns)
                
                vulnerability_history.append({
                    "scan_date": scan_date.isoformat(),
                    "total_vulnerabilities": len(scan_vulns),
                    "by_severity": dict(severity_count),
                    "by_type": dict(type_count)
                })
            
            return {
                "target_url": target_url,
                "total_scans": len(scan_dates),
                "total_vulnerabilities": len(vulnerabilities),
                "latest_scan": scan_dates[-1].isoformat(),
                "vulnerability_history": vulnerability_history
            }
            
        except Exception as e:
            logger.error(f"Error getting target vulnerability stats: {str(e)}")
            raise
    
    @staticmethod
    def get_risk_assessment_summary(user_id=None):
        """
        获取风险评估摘要
        """
        try:
            query = Vulnerability.query.join(Scan)
            
            if user_id:
                query = query.filter(Scan.created_by == user_id)
            
            vulnerabilities = query.all()
            
            # 计算风险分数分布
            risk_scores = [v.risk_score for v in vulnerabilities]
            average_risk = sum(risk_scores) / len(risk_scores) if risk_scores else 0
            
            # 高风险漏洞
            high_risk_vulnerabilities = [
                v for v in vulnerabilities 
                if v.risk_score >= 0.7 or v.severity in ['high', 'critical']
            ]
            
            # 风险随时间变化
            risk_by_month = defaultdict(lambda: {
                "total": 0,
                "high_risk": 0,
                "average_score": 0,
                "scores": []
            })
            
            for vuln in vulnerabilities:
                month_key = vuln.created_at.strftime("%Y-%m")
                risk_by_month[month_key]["total"] += 1
                risk_by_month[month_key]["scores"].append(vuln.risk_score)
                if vuln.risk_score >= 0.7:
                    risk_by_month[month_key]["high_risk"] += 1
            
            # 计算每月平均风险分数
            for month, data in risk_by_month.items():
                if data["scores"]:
                    data["average_score"] = sum(data["scores"]) / len(data["scores"])
                del data["scores"]  # 删除原始分数列表
            
            # 按严重性分组的漏洞分布
            severity_distribution = defaultdict(list)
            for vuln in vulnerabilities:
                severity_distribution[vuln.severity].append(vuln)
            
            # 按漏洞类型的风险分析
            type_risk_analysis = {}
            for vuln_type in set(v.vuln_type for v in vulnerabilities):
                type_vulns = [v for v in vulnerabilities if v.vuln_type == vuln_type]
                avg_score = sum(v.risk_score for v in type_vulns) / len(type_vulns)
                type_risk_analysis[vuln_type] = {
                    "count": len(type_vulns),
                    "average_risk_score": round(avg_score, 3),
                    "high_risk_count": sum(1 for v in type_vulns if v.risk_score >= 0.7)
                }
            
            return {
                "average_risk_score": round(average_risk, 3),
                "total_high_risk_vulnerabilities": len(high_risk_vulnerabilities),
                "risk_trend": sorted([
                    {"month": month, **data}
                    for month, data in risk_by_month.items()
                ], key=lambda x: x["month"]),
                "severity_distribution": {
                    severity: {
                        "count": len(vulns),
                        "average_risk_score": round(
                            sum(v.risk_score for v in vulns) / len(vulns), 
                            3
                        ) if vulns else 0
                    }
                    for severity, vulns in severity_distribution.items()
                },
                "vulnerability_type_risk": type_risk_analysis
            }
            
        except Exception as e:
            logger.error(f"Error getting risk assessment summary: {str(e)}")
            raise
    
    @staticmethod
    def get_remediation_progress(start_date=None, end_date=None, user_id=None):
        """
        获取修复进度统计
        """
        try:
            # 获取所有漏洞
            all_query = Vulnerability.query.join(Scan)
            
            if user_id:
                all_query = all_query.filter(Scan.created_by == user_id)
            
            if start_date:
                all_query = all_query.filter(Scan.created_at >= start_date)
            
            if end_date:
                all_query = all_query.filter(Scan.created_at <= end_date)
            
            all_vulnerabilities = all_query.all()
            
            # 计算已修复的漏洞（这里简化处理，实际应该有修复状态字段）
            # 这里假设较新扫描中不存在的漏洞已修复
            fixed_count = 0
            ongoing_count = len(all_vulnerabilities)
            
            # 按严重程度计算修复进度
            severity_progress = {}
            for severity in ['low', 'medium', 'high', 'critical']:
                severity_vulns = [v for v in all_vulnerabilities if v.severity == severity]
                # 简化：假设没有修复
                severity_progress[severity] = {
                    "total": len(severity_vulns),
                    "fixed": 0,
                    "in_progress": len(severity_vulns),
                    "progress_percentage": 0
                }
            
            return {
                "total_vulnerabilities": ongoing_count,
                "fixed_count": fixed_count,
                "in_progress_count": ongoing_count,
                "total_progress_percentage": 0,
                "severity_progress": severity_progress,
                "estimated_fix_time": {
                    "low": "1-2周",
                    "medium": "2-4周",
                    "high": "1-2周",
                    "critical": "24-48小时"
                }
            }
            
        except Exception as e:
            logger.error(f"Error getting remediation progress: {str(e)}")
            raise
    
    @staticmethod
    def export_statistics_to_json(start_date=None, end_date=None, user_id=None):
        """
        导出统计数据为JSON格式
        """
        try:
            statistics = {
                "generated_at": datetime.utcnow().isoformat(),
                "time_range": {
                    "start_date": start_date.isoformat() if start_date else None,
                    "end_date": end_date.isoformat() if end_date else None
                },
                "summary": VulnerabilityStatsService.get_vulnerability_summary(
                    start_date, end_date, user_id
                ),
                "risk_assessment": VulnerabilityStatsService.get_risk_assessment_summary(user_id),
                "remediation_progress": VulnerabilityStatsService.get_remediation_progress(
                    start_date, end_date, user_id
                )
            }
            
            return json.dumps(statistics, ensure_ascii=False, indent=2)
            
        except Exception as e:
            logger.error(f"Error exporting statistics: {str(e)}")
            raise