import xgboost as xgb
import lightgbm as lgb
from sklearn.preprocessing import LabelEncoder, StandardScaler, PolynomialFeatures
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, cross_val_score
from sklearn.feature_selection import SelectFromModel, mutual_info_classif, SelectKBest
import numpy as np
import pandas as pd
import pickle
import os
import re
import joblib
import random
from collections import defaultdict, Counter
from datetime import datetime, timedelta
from app.utils.logger import logger
from app.utils.http_utils import detect_tech_stack
import shutil
import json
import warnings

# 忽略特定警告以保持日志清晰
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)

# 尝试导入SHAP进行模型解释（如果可用）
try:
    import shap
    SHAP_AVAILABLE = True
except ImportError:
    SHAP_AVAILABLE = False
    logger.warning("SHAP library not available, model interpretation features will be limited")

# 模型配置 - 高级增强版，增加更多XGBoost高级特性和参数优化
MODEL_CONFIG = {
    'type': 'xgboost',  # 默认使用XGBoost
    'update_interval_days': 7,  # 缩短更新间隔以更快适应新威胁
    'retrain_sample_size': 15000,  # 增加再训练样本大小
    'evaluation_metric': 'f1',  # 使用F1分数作为主要评估指标
    'enable_hyperparameter_tuning': True,  # 启用超参数调优
    'enable_gpu_acceleration': False,  # GPU加速（根据实际环境启用）
    'enable_model_interpretation': True,  # 启用模型解释
    'enable_feature_selection': True,  # 启用特征选择
    'enable_model_ensembling': True,  # 启用模型集成
    'ensemble_size': 3,  # 集成模型数量
    'enable_shap_interpretation': True,  # 启用SHAP模型解释
    'lightgbm_params': {
        'boosting_type': 'gbdt',
        'objective': 'multiclass',
        'num_class': 4,  # 4个类别：low, medium, high, critical
        'metric': ['multi_logloss', 'multi_error'],
        'learning_rate': 0.03,
        'num_leaves': 63,
        'feature_fraction': 0.7,
        'bagging_fraction': 0.7,
        'bagging_freq': 3,
        'verbose': 0,
        'random_state': 42,
        'max_depth': 12,
        'min_child_samples': 20,
        'lambda_l1': 0.1,
        'lambda_l2': 0.1,
        'extra_trees': True,
        'early_stopping_round': 100
    },
    'xgboost_params': {
        'n_estimators': 300,
        'max_depth': 8,
        'learning_rate': 0.05,
        'subsample': 0.8,
        'colsample_bytree': 0.8,
        'colsample_bylevel': 0.7,
        'colsample_bynode': 0.8,
        'use_label_encoder': False,
        'eval_metric': ['mlogloss', 'merror'],
        'random_state': 42,
        'min_child_weight': 5,
        'gamma': 0.2,
        'reg_alpha': 0.1,
        'reg_lambda': 0.1,
        'max_delta_step': 0,
        'scale_pos_weight': 1,
        'base_score': 0.5,
        'grow_policy': 'depthwise',
        'tree_method': 'exact',  # 使用精确树方法提高精度
        'single_precision_histogram': True,
        'max_bin': 256  # 增加直方图最大分箱数以提高精度
    },
    'hyperparameter_search_space': {
        'n_estimators': [100, 200, 300, 500],
        'max_depth': [4, 6, 8, 10, 12],
        'learning_rate': [0.01, 0.03, 0.05, 0.1],
        'subsample': [0.7, 0.8, 0.9, 1.0],
        'colsample_bytree': [0.6, 0.7, 0.8, 0.9],
        'min_child_weight': [1, 3, 5, 7],
        'gamma': [0.0, 0.1, 0.2, 0.3],
        'reg_alpha': [0.0, 0.1, 0.2, 0.3],
        'reg_lambda': [0.0, 0.1, 0.2, 0.3]
    },
    'advanced_features': {
        'enable_feature_interaction': True,  # 启用特征交互
        'enable_feature_importance_tracking': True,
        'enable_early_stopping': True,
        'early_stopping_rounds': 50,
        'cross_validation_folds': 5,
        'shuffle_training_data': True,
        'stratified_sampling': True,
        'enable_feature_crossing': True,  # 启用特征交叉
        'crossing_degree': 2,  # 特征交叉阶数
        'enable_statistical_features': True,  # 启用统计特征
        'enable_drift_detection': True,  # 启用数据漂移检测
        'drift_threshold': 0.2,  # 漂移阈值
        'enable_ab_testing': False  # 启用A/B测试
    }
}

# 模型性能跟踪
MODEL_PERFORMANCE_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'models', 'model_performance.json')

# 模型保存路径
MODEL_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'models', f'risk_model_{MODEL_CONFIG["type"]}.pkl')

# 模型元数据路径
MODEL_METADATA_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'models', 'model_metadata.json')

# 初始化模型
risk_model = None
model_metadata = None

# 预定义的漏洞类型权重（高级增强版）
VULN_TYPE_WEIGHTS = {
    'sql_injection': 0.9,
    'xss': 0.7,
    'info_leak': 0.6,
    'command_injection': 0.95,
    'csrf': 0.5,
    'file_upload': 0.85,
    'path_traversal': 0.75,
    'ldap_injection': 0.8,
    'ssrf': 0.85,
    'unauthorized_access': 0.8,
    'security_headers': 0.65,
    'insecure_cookies': 0.7,
    'missing_authentication': 0.9,
    'weak_password_policy': 0.8,
    'insecure_deserialization': 0.9,
    'ssti': 0.85,  # 服务端模板注入
    'deserialization': 0.92,
    'xxe': 0.88,  # XML外部实体注入
    'authentication_bypass': 0.93,
    'authorization_bypass': 0.87,
    'dos': 0.78,  # 拒绝服务
    'crypto_issue': 0.82,  # 加密问题
    'dependency_vulnerability': 0.8,  # 依赖组件漏洞
    'clickjacking': 0.6,
    'open_redirect': 0.65,
    'remote_code_execution': 0.98,  # 远程代码执行
    'privilege_escalation': 0.91,  # 权限提升
    'insecure_configuration': 0.75,  # 不安全配置
    'buffer_overflow': 0.94,  # 缓冲区溢出
    'race_condition': 0.83,  # 竞态条件
    'side_channel_attack': 0.86  # 侧信道攻击
}

# 技术栈风险权重
TECH_STACK_RISK_WEIGHTS = {
    'PHP': 1.2,  # PHP历史上漏洞较多
    'ASP': 1.3,
    'ASP.NET': 0.9,
    'Java': 0.85,
    'Node.js': 0.95,
    'Python': 0.8,
    'Apache': 1.0,
    'Nginx': 0.9,
    'IIS': 1.1,
    'WordPress': 1.3,
    'Joomla': 1.2,
    'Drupal': 1.1,
    'jQuery': 1.05,
    'React': 0.85,
    'Angular': 0.9,
    'Vue.js': 0.9
}

# 参数位置风险权重
PARAMETER_LOCATION_RISK_WEIGHTS = {
    'cookie': 1.2,     # Cookie通常被忽视
    'header': 1.3,     # 头部参数更容易被忽略
    'path': 1.1,       # 路径参数
    'query': 1.0,      # 查询参数（标准）
    'form': 0.95,      # 表单参数（通常有基本验证）
    'json': 0.9        # JSON参数（通常有格式验证）
}

# 预定义的路径风险权重
PATH_RISK_WEIGHTS = {
    '/login': 0.9,
    '/admin': 0.95,
    '/api': 0.8,
    '/database': 0.98,
    '/config': 0.92,
    '/user': 0.75
}


def init_risk_model():
    """初始化风险评估模型，包含自动更新检查"""
    global risk_model, model_metadata
    
    try:
        # 确保models目录存在
        os.makedirs(os.path.dirname(MODEL_PATH), exist_ok=True)
        
        # 加载模型元数据
        model_metadata = load_model_metadata()
        
        # 检查模型是否需要更新
        if should_update_model():
            logger.info(f"Model needs update, retraining...")
            # 重新训练模型
            risk_model = retrain_model()
            # 更新元数据
            update_model_metadata()
            # 保存新模型
            save_model(risk_model)
        else:
            # 尝试加载已训练的模型
            if os.path.exists(MODEL_PATH):
                risk_model = load_model()
                logger.info(f"Loaded {MODEL_CONFIG['type']} risk model from {MODEL_PATH}")
            else:
                # 如果没有已训练的模型，创建一个新的模型
                risk_model = create_default_model()
                logger.info(f"Created default {MODEL_CONFIG['type']} risk model")
                
                # 保存模型
                save_model(risk_model)
                # 保存元数据
                update_model_metadata()
                
    except Exception as e:
        logger.error(f"Error initializing risk model: {str(e)}")
        # 如果初始化失败，使用默认模型
        risk_model = create_default_model()

def load_model_metadata():
    """加载模型元数据"""
    try:
        if os.path.exists(MODEL_METADATA_PATH):
            with open(MODEL_METADATA_PATH, 'r') as f:
                return json.load(f)
    except Exception as e:
        logger.warning(f"Error loading model metadata: {str(e)}")
    
    # 返回默认元数据
    return {
        'created_at': datetime.now().isoformat(),
        'last_updated': datetime.now().isoformat(),
        'model_type': MODEL_CONFIG['type'],
        'version': '1.0',
        'training_sample_size': MODEL_CONFIG['retrain_sample_size']
    }

def save_model_metadata(metadata):
    """保存模型元数据"""
    try:
        with open(MODEL_METADATA_PATH, 'w') as f:
            json.dump(metadata, f, indent=4)
    except Exception as e:
        logger.error(f"Error saving model metadata: {str(e)}")

def update_model_metadata():
    """更新模型元数据"""
    global model_metadata
    if model_metadata is None:
        model_metadata = load_model_metadata()
        
    model_metadata['last_updated'] = datetime.now().isoformat()
    model_metadata['model_type'] = MODEL_CONFIG['type']
    
    # 增加版本号
    version_parts = model_metadata.get('version', '1.0').split('.')
    minor_version = int(version_parts[1]) + 1
    model_metadata['version'] = f"{version_parts[0]}.{minor_version}"
    
    save_model_metadata(model_metadata)

def should_update_model():
    """检查模型是否需要更新"""
    global model_metadata
    if model_metadata is None:
        model_metadata = load_model_metadata()
        
    # 检查是否存在模型文件
    if not os.path.exists(MODEL_PATH):
        return False
        
    # 检查模型类型是否已更改
    if model_metadata.get('model_type') != MODEL_CONFIG['type']:
        logger.info(f"Model type changed from {model_metadata.get('model_type')} to {MODEL_CONFIG['type']}")
        return True
        
    # 检查上次更新时间
    last_updated_str = model_metadata.get('last_updated')
    if last_updated_str:
        try:
            last_updated = datetime.fromisoformat(last_updated_str)
            days_since_update = (datetime.now() - last_updated).days
            if days_since_update >= MODEL_CONFIG['update_interval_days']:
                logger.info(f"Model hasn't been updated in {days_since_update} days, needs update")
                return True
        except Exception as e:
            logger.warning(f"Error parsing last_updated date: {str(e)}")
            
    return False

def load_model():
    """加载模型"""
    try:
        with open(MODEL_PATH, 'rb') as f:
            return pickle.load(f)
    except Exception as e:
        logger.error(f"Error loading model: {str(e)}")
        raise

def save_model(model):
    """保存模型"""
    try:
        # 创建备份
        if os.path.exists(MODEL_PATH):
            backup_path = f"{MODEL_PATH}.bak"
            shutil.copy2(MODEL_PATH, backup_path)
            logger.info(f"Created model backup at {backup_path}")
            
        with open(MODEL_PATH, 'wb') as f:
            pickle.dump(model, f)
        logger.info(f"Saved model to {MODEL_PATH}")
    except Exception as e:
        logger.error(f"Error saving model: {str(e)}")
        raise

def retrain_model(training_data=None):
    """重新训练模型（增强版）- 支持传入真实训练数据"""
    logger.info(f"Retraining {MODEL_CONFIG['type']} model with {MODEL_CONFIG['retrain_sample_size']} samples")
    
    # 如果提供了真实训练数据，则使用它，否则生成样本数据
    if training_data and len(training_data) > 100:
        logger.info(f"Using provided real training data with {len(training_data)} samples")
        X_train_full, y_train_full = preprocess_real_training_data(training_data)
    else:
        # 生成增强的训练数据
        X_train_full, y_train_full = generate_sample_data(sample_size=MODEL_CONFIG['retrain_sample_size'])
    
    # 获取高级特性配置
    advanced = MODEL_CONFIG['advanced_features']
    
    # 分割训练集和验证集 - 支持分层采样
    stratify = y_train_full if advanced.get('stratified_sampling', True) else None
    X_train, X_val, y_train, y_val = train_test_split(
        X_train_full, y_train_full, 
        test_size=0.2, 
        random_state=42,
        shuffle=advanced.get('shuffle_training_data', True),
        stratify=stratify
    )
    
    # 记录训练开始时间
    start_time = datetime.now()
    
    # 创建模型
    if MODEL_CONFIG['type'] == 'lightgbm':
        # 使用配置中的增强参数
        params = MODEL_CONFIG['lightgbm_params'].copy()
        
        # 创建训练数据集
        train_data = lgb.Dataset(X_train, label=y_train)
        val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)
        
        # 训练模型
        model = lgb.train(
            params,
            train_data,
            num_boost_round=1000,
            valid_sets=[train_data, val_data],
            valid_names=['train', 'validation'],
            callbacks=[lgb.early_stopping(stopping_rounds=advanced.get('early_stopping_rounds', 50))]
        )
    else:
        # 使用配置中的增强XGBoost参数
        params = MODEL_CONFIG['xgboost_params'].copy()
        
        # 创建模型
        model = xgb.XGBClassifier(**params)
        
        # 交叉验证优化
        if advanced.get('cross_validation_folds', 0) > 1:
            try:
                cv_folds = advanced['cross_validation_folds']
                scoring = MODEL_CONFIG['evaluation_metric']
                cv_results = cross_val_score(model, X_train, y_train, cv=cv_folds, scoring=scoring)
                logger.info(f"Cross-validation {scoring}: {np.mean(cv_results):.4f} ± {np.std(cv_results):.4f}")
            except Exception as e:
                logger.warning(f"Cross-validation skipped: {str(e)}")
        
        # 超参数调优
        if MODEL_CONFIG.get('enable_hyperparameter_tuning', True):
            logger.info("Performing hyperparameter tuning...")
            search_space = MODEL_CONFIG.get('hyperparameter_search_space', {})
            
            # 简化搜索空间以提高效率
            simplified_space = {
                'max_depth': search_space.get('max_depth', [6, 8, 10]),
                'learning_rate': search_space.get('learning_rate', [0.03, 0.05, 0.07]),
                'n_estimators': search_space.get('n_estimators', [200, 300, 400]),
                'subsample': search_space.get('subsample', [0.7, 0.8, 0.9]),
                'colsample_bytree': search_space.get('colsample_bytree', [0.7, 0.8, 0.9]),
                'gamma': search_space.get('gamma', [0.1, 0.2, 0.3])
            }
            
            # 使用随机搜索进行高效调优
            random_search = RandomizedSearchCV(
                estimator=model,
                param_distributions=simplified_space,
                n_iter=10,  # 控制迭代次数
                cv=3,
                scoring='f1_weighted',
                random_state=42,
                verbose=1
            )
            
            random_search.fit(X_train, y_train)
            logger.info(f"Best hyperparameters found: {random_search.best_params_}")
            logger.info(f"Best score: {random_search.best_score_:.4f}")
            model = random_search.best_estimator_
        
        # 特征选择
        if MODEL_CONFIG.get('enable_feature_selection', True):
            logger.info("Performing feature selection...")
            # 使用SelectFromModel进行基于重要性的特征选择
            selector = SelectFromModel(model, threshold='median')
            X_train_selected = selector.fit_transform(X_train, y_train)
            X_val_selected = selector.transform(X_val)
            
            # 获取选中的特征索引
            selected_indices = selector.get_support(indices=True)
            selected_features = X_train.columns[selected_indices].tolist()
            logger.info(f"Selected {len(selected_features)} features: {selected_features}")
            
            # 使用选中的特征重新训练模型
            model.fit(X_train_selected, y_train, 
                     eval_set=[(X_val_selected, y_val)],
                     early_stopping_rounds=advanced.get('early_stopping_rounds', 50),
                     verbose=False)
            
            # 保存特征选择信息
            model.selected_features = selected_features
            model.feature_selector = selector
        else:
            # 启用特征交互（通过增加树深度和调整参数）
            if advanced.get('enable_feature_interaction', True):
                model.set_params(
                    max_depth=min(params['max_depth'] + 2, 12),  # 适当增加树深度
                    colsample_bytree=min(params['colsample_bytree'] + 0.1, 0.9),
                    subsample=min(params['subsample'] + 0.1, 0.9)
                )
            
            # 训练模型
            fit_params = {
                'eval_set': [(X_val, y_val)],
                'verbose': False
            }
            
            # 启用早停
            if advanced.get('enable_early_stopping', True):
                fit_params['early_stopping_rounds'] = advanced.get('early_stopping_rounds', 50)
            
            model.fit(X_train, y_train, **fit_params)
        
        # 模型集成（如果启用）
        if MODEL_CONFIG.get('enable_model_ensembling', True) and MODEL_CONFIG.get('type') == 'xgboost':
            logger.info("Creating ensemble of XGBoost models...")
            # 创建多个不同参数的XGBoost模型
            ensemble_models = []
            for i in range(min(MODEL_CONFIG.get('ensemble_size', 3), 5)):
                # 为每个模型使用不同的随机种子
                ensemble_params = params.copy()
                ensemble_params['random_state'] = 42 + i
                ensemble_params['colsample_bytree'] = 0.7 + (i * 0.05)  # 轻微变化
                ensemble_params['subsample'] = 0.75 + (i * 0.05)  # 轻微变化
                
                ensemble_model = xgb.XGBClassifier(**ensemble_params)
                
                # 训练集成模型
                if hasattr(model, 'selected_features'):
                    # 使用选中的特征
                    ensemble_model.fit(X_train_selected, y_train, 
                                     eval_set=[(X_val_selected, y_val)],
                                     early_stopping_rounds=30,
                                     verbose=False)
                else:
                    ensemble_model.fit(X_train, y_train, 
                                     eval_set=[(X_val, y_val)],
                                     early_stopping_rounds=30,
                                     verbose=False)
                
                ensemble_models.append((f'xgb_model_{i}', ensemble_model))
            
            # 创建投票分类器作为最终模型
            from sklearn.ensemble import VotingClassifier
            model = VotingClassifier(estimators=ensemble_models, voting='soft')
            
            if hasattr(model, 'selected_features'):
                model.fit(X_train_selected, y_train)
            else:
                model.fit(X_train, y_train)
    
    # 记录训练结束时间
    end_time = datetime.now()
    training_time = (end_time - start_time).total_seconds()
    
    logger.info(f"Model retraining completed in {training_time:.2f} seconds")
    
    # 跟踪模型性能
    track_model_performance(model, X_val, y_val)
    
    # 特征重要性分析 - 增强版
    if advanced.get('enable_feature_importance_tracking', True):
        try:
            analyze_feature_importance(model, X_train, training_time)
        except Exception as e:
            logger.warning(f"Feature importance analysis failed: {str(e)}")
    
    return model

def preprocess_real_training_data(training_data):
    """预处理真实训练数据"""
    logger.info(f"Preprocessing real training data with {len(training_data)} samples")
    
    features_list = []
    labels = []
    valid_count = 0
    
    for item in training_data:
        try:
            # 提取特征
            features = extract_features(item)
            
            # 获取标签
            risk_score = item.get('risk_score', 0.5)
            # 根据风险分数生成标签
            if risk_score >= 0.9:
                label = 3  # critical
            elif risk_score >= 0.7:
                label = 2  # high
            elif risk_score >= 0.4:
                label = 1  # medium
            else:
                label = 0  # low
            
            features_list.append(features)
            labels.append(label)
            valid_count += 1
        except Exception as e:
            logger.warning(f"Error processing training item: {str(e)}")
    
    logger.info(f"Successfully processed {valid_count} valid training samples")
    
    # 创建特征DataFrame
    X = pd.DataFrame(features_list)
    
    # 标准化数值特征
    numeric_features = [col for col in X.columns if col != 'vuln_type_encoded']
    scaler = StandardScaler()
    X[numeric_features] = scaler.fit_transform(X[numeric_features])
    
    return X, np.array(labels)

def analyze_feature_importance(model, X_train, training_time):
    """增强的特征重要性分析"""
    try:
        # 获取特征重要性
        if MODEL_CONFIG['type'] == 'lightgbm':
            importance_split = model.feature_importance(importance_type='split')
            importance_gain = model.feature_importance(importance_type='gain')
            feature_names = model.feature_name()
            
            # 创建特征重要性DataFrame
            importance_df = pd.DataFrame({
                'Feature': feature_names,
                'Split_Importance': importance_split,
                'Gain_Importance': importance_gain
            })
        else:
            # XGBoost支持多种重要性类型
            importance_gain = model.feature_importances_  # 默认是gain
            # 获取weight重要性（分裂次数）
            importance_weight = model.get_booster().get_score(importance_type='weight')
            # 获取cover重要性
            importance_cover = model.get_booster().get_score(importance_type='cover')
            
            # 确保所有特征都包含在内
            feature_names = X_train.columns.tolist()
            importance_df = pd.DataFrame({
                'Feature': feature_names,
                'Gain_Importance': [importance_gain[i] for i in range(len(feature_names))],
                'Weight_Importance': [importance_weight.get(f, 0) for f in feature_names],
                'Cover_Importance': [importance_cover.get(f, 0) for f in feature_names]
            })
        
        # 排序并打印前10个重要特征
        top_features = importance_df.sort_values('Gain_Importance', ascending=False).head(10)
        logger.info(f"\nTop 10 important features based on Gain:")
        for idx, row in top_features.iterrows():
            logger.info(f"  {row['Feature']}: {row['Gain_Importance']:.4f}")
        
        # 保存特征重要性到文件
        importance_file = os.path.join(os.path.dirname(MODEL_PATH), 'feature_importance.csv')
        importance_df.to_csv(importance_file, index=False)
        logger.info(f"Feature importance saved to {importance_file}")
        
        # 生成特征重要性摘要
        total_features = len(feature_names)
        top_3_contribution = importance_df['Gain_Importance'].nlargest(3).sum() / importance_df['Gain_Importance'].sum()
        
        logger.info(f"\nFeature Importance Summary:")
        logger.info(f"  Total features: {total_features}")
        logger.info(f"  Top 3 features contribution: {top_3_contribution:.2%}")
        logger.info(f"  Training time: {training_time:.2f} seconds")
        
    except Exception as e:
        logger.error(f"Enhanced feature importance analysis failed: {str(e)}")
        # 回退到基本分析
        try:
            if MODEL_CONFIG['type'] == 'lightgbm':
                importance = model.feature_importance(importance_type='split')
                feature_names = model.feature_name()
            else:
                importance = model.feature_importances_
                feature_names = X_train.columns
            
            importance_df = pd.DataFrame({
                'Feature': feature_names,
                'Importance': importance
            }).sort_values('Importance', ascending=False)
            
            logger.info(f"Basic feature importance - Top 5:\n{importance_df.head(5).to_string(index=False)}")
        except:
            pass


def create_default_model():
    """创建默认的风险评估模型"""
    # 根据配置选择模型类型
    if MODEL_CONFIG['type'] == 'lightgbm':
        model = lgb.LGBMClassifier(**MODEL_CONFIG['lightgbm_params'])
    else:
        model = xgb.XGBClassifier(**MODEL_CONFIG['xgboost_params'])
    
    # 为了演示，我们使用一些模拟数据来训练模型
    X_train, y_train = generate_sample_data()
    
    try:
        # 训练模型
        model.fit(X_train, y_train)
        logger.info("Trained model with sample data")
        
        # 跟踪模型性能
        track_model_performance(model, X_train, y_train)
    except Exception as e:
        logger.error(f"Error training model: {str(e)}")
    
    return model

def generate_sample_data(sample_size=1000):
    """生成增强的训练样本数据"""
    # 创建模拟数据
    
    # 随机生成特征
    vuln_types = list(VULN_TYPE_WEIGHTS.keys())
    paths = list(PATH_RISK_WEIGHTS.keys())
    param_locations = list(PARAMETER_LOCATION_RISK_WEIGHTS.keys())
    tech_stacks = list(TECH_STACK_RISK_WEIGHTS.keys())
    
    # 生成基础特征
    vuln_type_samples = np.random.choice(vuln_types, sample_size)
    path_samples = np.random.choice(paths + ['/random/path', '/api/data', '/user/profile'], sample_size)
    param_location_samples = np.random.choice(param_locations, sample_size)
    
    # 生成增强特征
    exploitability = np.random.uniform(0, 1, sample_size)  # 连续值0-1
    evidence_quality = np.random.uniform(0, 1, sample_size)
    evidence_length = np.random.randint(10, 2000, sample_size)
    exposure_surface = np.random.randint(1, 4, sample_size)  # 1-3
    historical_exploit_frequency = np.random.randint(1, 6, sample_size)  # 1-5
    pattern_match_score = np.random.uniform(0, 1, sample_size)
    
    # 生成技术栈风险分数
    tech_risk_scores = np.random.uniform(0.8, 1.4, sample_size)
    
    # 计算风险分数（增强的多因素模型）
    risk_scores = []
    for i in range(sample_size):
        vuln_type = vuln_type_samples[i]
        path = path_samples[i]
        param_location = param_location_samples[i]
        
        # 基础分数基于漏洞类型
        base_score = VULN_TYPE_WEIGHTS.get(vuln_type, 0.5)
        
        # 路径风险调整
        path_risk = 0
        for risky_path, weight in PATH_RISK_WEIGHTS.items():
            if risky_path in path:
                path_risk = max(path_risk, weight)
        if path_risk == 0:
            path_risk = 0.4  # 默认路径风险
        
        # 参数位置风险调整
        param_location_risk = PARAMETER_LOCATION_RISK_WEIGHTS.get(param_location, 1.0)
        
        # 其他因素调整
        exploit_adj = 0.2 * exploitability[i]
        quality_adj = 0.15 * evidence_quality[i]
        exposure_adj = 0.1 * (exposure_surface[i] - 1)
        pattern_adj = 0.2 * pattern_match_score[i]
        hist_adj = min(0.2, 0.04 * (historical_exploit_frequency[i] - 1))
        
        # 技术栈风险调整
        tech_adj = tech_risk_scores[i] * 0.1
        
        # 综合分数（复杂的多因素模型）
        score = base_score * path_risk * param_location_risk
        score += exploit_adj + quality_adj + exposure_adj + pattern_adj + hist_adj + tech_adj
        score = min(1.0, score)
        
        # 添加一些随机波动来模拟真实世界的不确定性
        score *= np.random.uniform(0.9, 1.1)
        score = min(1.0, max(0.0, score))
        
        risk_scores.append(score)
    
    # 转换为严重程度标签（增加critical级别）
    y_train = []
    for score in risk_scores:
        if score >= 0.9:
            y_train.append(3)  # critical
        elif score >= 0.7:
            y_train.append(2)  # high
        elif score >= 0.4:
            y_train.append(1)  # medium
        else:
            y_train.append(0)  # low
    
    # 创建增强的特征矩阵
    X_data = {
        'vuln_type_encoded': LabelEncoder().fit_transform(vuln_type_samples),
        'exploitability': exploitability,
        'evidence_quality': evidence_quality,
        'evidence_length_norm': (evidence_length - np.mean(evidence_length)) / np.std(evidence_length),
        'exposure_surface': exposure_surface,
        'historical_exploit_frequency': historical_exploit_frequency,
        'param_location_risk': [PARAMETER_LOCATION_RISK_WEIGHTS.get(loc, 1.0) for loc in param_location_samples],
        'tech_risk_score': tech_risk_scores,
        'pattern_match_score': pattern_match_score,
        'path_risk_score': [max([weight for risky_path, weight in PATH_RISK_WEIGHTS.items() if risky_path in path], default=0.4) for path in path_samples]
    }
    
    X_train = pd.DataFrame(X_data)
    
    # 标准化数值特征
    scaler = StandardScaler()
    numeric_features = ['exploitability', 'evidence_quality', 'evidence_length_norm', 
                        'exposure_surface', 'historical_exploit_frequency', 
                        'param_location_risk', 'tech_risk_score', 'pattern_match_score', 'path_risk_score']
    X_train[numeric_features] = scaler.fit_transform(X_train[numeric_features])
    
    return X_train, np.array(y_train)

def extract_features(vulnerability):
    """从漏洞数据中提取增强特征集 - 包含更多维度的特征工程"""
    # 提取基本特征
    vuln_type = vulnerability.get('vuln_type', 'unknown')
    path = vulnerability.get('path', '')
    evidence = vulnerability.get('evidence', '')
    response = vulnerability.get('response', '')
    payload = vulnerability.get('payload', '')
    parameter = vulnerability.get('parameter', '')
    parameter_location = vulnerability.get('parameter_location', 'query')
    tech_stack = vulnerability.get('tech_stack', [])
    details = vulnerability.get('details', {})
    
    # 综合内容分析
    combined_content = f"{payload} {response} {evidence}".lower()
    
    # 14. 新增特征：攻击面复杂度
    attack_surface_complexity = 0.5  # 基础值
    # 基于路径和参数复杂度
    if len(path.split('/')) > 5:
        attack_surface_complexity += 0.2
    if parameter and len(parameter.split('&')) > 3:
        attack_surface_complexity += 0.1
    # 基于请求方法
    http_method = vulnerability.get('http_method', 'GET')
    if http_method in ['POST', 'PUT', 'DELETE']:
        attack_surface_complexity += 0.15
    attack_surface_complexity = min(1.0, attack_surface_complexity)
    
    # 15. 新增特征：CVSS评分（如果有）
    cvss_score = details.get('cvss_score', 0)
    try:
        cvss_score = float(cvss_score) / 10.0  # 归一化到0-1
    except:
        cvss_score = 0.0
    
    # 16. 新增特征：历史漏洞频率（基于路径）
    historical_vuln_freq = details.get('historical_vuln_count', 0)
    historical_vuln_freq = min(1.0, historical_vuln_freq / 10.0)  # 归一化
    
    # 计算特征值
    vuln_type_weight = VULN_TYPE_WEIGHTS.get(vuln_type, 0.5)
    
    # 1. 路径风险评估增强版
    path_risk = 0
    # 精确匹配关键路径
    for risky_path, weight in PATH_RISK_WEIGHTS.items():
        if risky_path in path:
            path_risk = max(path_risk, weight)
    # 添加更多敏感路径模式匹配
    sensitive_patterns = {
        r'/admin.*': 0.95,
        r'/login.*': 0.9,
        r'/api/.*?/auth': 0.92,
        r'/config': 0.93,
        r'/database': 0.98,
        r'/backup': 0.94,
        r'/restore': 0.94,
        r'/upload': 0.88,
        r'/file': 0.85,
        r'/system': 0.87,
        r'/settings': 0.86,
        r'/user/.*?/profile': 0.75
    }
    for pattern, weight in sensitive_patterns.items():
        if re.search(pattern, path, re.IGNORECASE):
            path_risk = max(path_risk, weight)
    if path_risk == 0:
        path_risk = 0.4  # 默认路径风险
    
    # 2. 增强的可利用性评估
    exploitability = 0
    # 错误信息检测
    error_keywords = ['error', 'exception', 'warning', 'fail', 'denied', 'unexpected', 'stack trace', 'traceback']
    if any(keyword in combined_content for keyword in error_keywords):
        exploitability += 0.4
    # 详细错误信息权重
    detailed_errors = ['sql syntax', 'database error', 'file not found', 'permission denied']
    if any(err in combined_content for err in detailed_errors):
        exploitability += 0.2
    # Payload和响应质量
    if payload and len(payload) > 8:
        exploitability += 0.2
    if response and len(response) > 100:
        exploitability += 0.2
    # CVSS分数影响（如果有）
    cvss_score = details.get('cvss_score')
    if cvss_score:
        try:
            exploitability += min(0.3, float(cvss_score) / 30.0)
        except:
            pass
    exploitability = min(1.0, exploitability)
    
    # 3. 证据质量分析
    evidence_quality = 0
    # 证据长度和内容质量
    evidence_length = len(str(combined_content))
    if evidence_length > 200:
        evidence_quality += 0.3
    elif evidence_length > 50:
        evidence_quality += 0.15
    # 结构化错误信息
    error_patterns = [r'error\s*:', r'exception\s*:', r'warning\s*:', r'fatal\s*:', r'critical\s*:']
    if any(re.search(pattern, combined_content, re.IGNORECASE) for pattern in error_patterns):
        evidence_quality += 0.4
    # 技术栈特定错误
    db_errors = ['mysql', 'postgresql', 'oracle', 'mssql', 'sqlite', 'jdbc', 'odbc']
    if any(db in combined_content for db in db_errors):
        evidence_quality += 0.3
    # 堆栈跟踪或调试信息
    debug_patterns = [r'stack\s*trace', r'traceback', r'line\s+\d+', r'file\s+".+?"']
    if any(re.search(pattern, combined_content, re.IGNORECASE) for pattern in debug_patterns):
        evidence_quality += 0.25
    evidence_quality = min(1.0, evidence_quality)
    
    # 4. 暴露面评估增强版
    exposure_surface = 1
    # 敏感路径分级
    sensitive_paths = {
        '/admin': 3, '/login': 3, '/register': 2, '/api': 2, 
        '/database': 3, '/config': 3, '/user': 2, '/auth': 2,
        '/upload': 3, '/backup': 3, '/restore': 3,
        '/payment': 3, '/checkout': 3, '/billing': 3,
        '/settings': 2, '/profile': 2, '/password': 3
    }
    for sensitive_path, score in sensitive_paths.items():
        if sensitive_path in path:
            exposure_surface = max(exposure_surface, score)
    # 公开API检查
    if '/api/' in path and 'public' in path:
        exposure_surface = max(exposure_surface, 3)
    # 管理功能检测
    admin_patterns = [r'/admin', r'/manage', r'/control', r'/dashboard', r'/panel']
    if any(pattern in path for pattern in admin_patterns):
        exposure_surface = max(exposure_surface, 3)
    
    # 5. 历史利用频率（基于漏洞类型的增强版）
    historical_exploit_frequency = {
        'sql_injection': 5,
        'xss': 4,
        'command_injection': 5,
        'file_upload': 4,
        'path_traversal': 3,
        'ssrf': 4,
        'csrf': 2,
        'ssti': 4,
        'deserialization': 5,
        'xxe': 3,
        'authentication_bypass': 5,
        'authorization_bypass': 4,
        'dos': 3,
        'crypto_issue': 2,
        'dependency_vulnerability': 3
    }.get(vuln_type, 1)
    
    # 6. 参数位置风险权重
    param_location_risk = PARAMETER_LOCATION_RISK_WEIGHTS.get(parameter_location, 1.0)
    
    # 7. 技术栈风险分析增强版
    tech_risk_score = 1.0
    # 基础技术栈风险
    if tech_stack:
        for tech in tech_stack:
            if tech in TECH_STACK_RISK_WEIGHTS:
                tech_risk_score = max(tech_risk_score, TECH_STACK_RISK_WEIGHTS[tech])
    # 从响应中提取技术栈
    if not tech_stack or tech_risk_score == 1.0:
        try:
            detected_tech = detect_tech_stack(response)
            for tech in detected_tech:
                if tech in TECH_STACK_RISK_WEIGHTS:
                    tech_risk_score = max(tech_risk_score, TECH_STACK_RISK_WEIGHTS[tech])
        except Exception as e:
            logger.debug(f"Tech stack detection failed: {str(e)}")
    # 从内容中提取框架信息
    framework_patterns = {
        'wordpress': 1.3,
        'joomla': 1.2,
        'drupal': 1.1,
        'magento': 1.25,
        'laravel': 0.9,
        'django': 0.85,
        'spring': 0.9,
        'struts': 1.4,
        'node.js': 0.95
    }
    for framework, weight in framework_patterns.items():
        if framework in combined_content:
            tech_risk_score = max(tech_risk_score, weight)
    
    # 8. 漏洞模式匹配特征增强版
    pattern_match_score = 0
    # 更全面的漏洞模式库
    patterns = {
        'sql_injection': [
            r'sql syntax error', r'database error', r'table.*doesn\'t exist',
            r'column.*not found', r'mysql.*error', r'postgresql.*error',
            r'sqlite.*error', r'ora-\d+', r'syntax error',
            r'you have an error in your sql syntax'
        ],
        'xss': [
            r'<script', r'javascript:', r'onerror=', r'alert\(',
            r'onload=', r'onclick=', r'onmouseover=', r'<iframe',
            r'document\.write', r'eval\(', r'expression\(',
            r'<object', r'<embed', r'<link'
        ],
        'command_injection': [
            r'command not found', r'bash:', r'windows cmd',
            r'cmd\.exe', r'shell', r'/bin/bash', r'/bin/sh',
            r'powershell', r'process', r'execution',
            r'permission denied', r'access denied'
        ],
        'info_leak': [
            r'password', r'secret', r'config', r'backup',
            r'private key', r'api key', r'token', r'credential',
            r'database', r'connection string', r'ini file',
            r'environment variable', r'.env', r'passwd'
        ],
        'path_traversal': [
            r'parent directory', r'../', r'file not found',
            r'no such file', r'cannot access', r'permission denied',
            r'index of', r'directory listing', r'file://'
        ],
        'ssti': [
            r'template error', r'template exception', r'rendering error',
            r'mustache', r'jinja', r'twig', r'freemarker',
            r'cannot compile', r'template syntax error'
        ]
    }
    
    if vuln_type in patterns:
        for pattern in patterns[vuln_type]:
            if re.search(pattern, combined_content, re.IGNORECASE):
                pattern_match_score += 0.2
    # 通用错误模式
    common_errors = [r'error', r'exception', r'warning', r'fatal']
    if any(re.search(error, combined_content, re.IGNORECASE) for error in common_errors):
        pattern_match_score += 0.1
    pattern_match_score = min(1.0, pattern_match_score)
    
    # 9. 新增特征：数据敏感性评估
    data_sensitivity = 0.5  # 基础值
    # 敏感数据模式
    sensitive_data_patterns = [
        r'password', r'passwd', r'secret', r'token', r'api[_-]?key',
        r'credential', r'private[_-]?key', r'public[_-]?key', r'auth',
        r'credit[_-]?card', r'payment', r'financial', r'bank', r'social[_-]?security',
        r'personal[_-]?info', r'pii', r'user[_-]?data', r'customer[_-]?data'
    ]
    for pattern in sensitive_data_patterns:
        if re.search(pattern, combined_content, re.IGNORECASE):
            data_sensitivity += 0.15
    # 敏感路径影响
    if any(sensitive_path in path for sensitive_path in ['/payment', '/billing', '/user', '/profile']):
        data_sensitivity += 0.2
    data_sensitivity = min(1.0, data_sensitivity)
    
    # 10. 新增特征：身份验证要求
    requires_auth = details.get('requires_auth', False)
    auth_required = 1 if requires_auth else 0
    
    # 11. 新增特征：漏洞利用可用性
    exploit_available = details.get('exploit_available', False)
    has_exploit = 1 if exploit_available else 0
    
    # 12. 新增特征：业务影响
    business_impact = 0.5  # 基础值
    # 关键业务功能影响
    critical_functions = ['login', 'payment', 'checkout', 'billing', 'user', 'profile', 'admin']
    if any(func in path for func in critical_functions):
        business_impact += 0.3
    # 数据泄露影响
    if data_sensitivity > 0.8:
        business_impact += 0.2
    business_impact = min(1.0, business_impact)
    
    # 13. 新增特征：修复难度估计
    remediation_difficulty = 0.5  # 中等难度
    # 复杂漏洞类型
    complex_vulns = ['remote_code_execution', 'insecure_deserialization', 'sql_injection', 'authentication_bypass']
    simple_vulns = ['security_headers', 'info_leak', 'missing_csp', 'clickjacking']
    
    if vuln_type in complex_vulns:
        remediation_difficulty = 0.8
    elif vuln_type in simple_vulns:
        remediation_difficulty = 0.3
    # 技术栈复杂度影响
    if tech_risk_score > 1.2:
        remediation_difficulty += 0.1
    remediation_difficulty = min(1.0, remediation_difficulty)
    
    # 构建增强的特征向量（17个特征）
    features = {
        'vuln_type_encoded': LabelEncoder().fit(list(VULN_TYPE_WEIGHTS.keys())).transform([vuln_type])[0] if vuln_type in VULN_TYPE_WEIGHTS else 0,
        'exploitability': exploitability,
        'evidence_quality': evidence_quality,
        'evidence_length_norm': (evidence_length - 500) / 500,  # 标准化
        'exposure_surface': exposure_surface,
        'historical_exploit_frequency': historical_exploit_frequency,
        'param_location_risk': param_location_risk,
        'tech_risk_score': tech_risk_score,
        'pattern_match_score': pattern_match_score,
        'path_risk_score': path_risk,
        'data_sensitivity': data_sensitivity,
        'auth_required': auth_required,
        'has_exploit': has_exploit,
        'business_impact': business_impact,
        'remediation_difficulty': remediation_difficulty,
        'attack_surface_complexity': attack_surface_complexity,
        'cvss_score': cvss_score,
        'historical_vuln_freq': historical_vuln_freq
    }
    
    return features

def predict_risk(vulnerability):
    """预测漏洞风险（增强版）- 支持XGBoost高级预测和详细风险分析"""
    try:
        # 确保模型已初始化
        global risk_model
        if risk_model is None:
            init_risk_model()
        
        # 提取增强特征
        features = extract_features(vulnerability)
        
        # 创建特征矩阵
        feature_df = pd.DataFrame([features])
        
        # 使用模型预测风险严重程度
        try:
            # 尝试使用机器学习模型预测
            # 检查是否需要应用特征选择
            if hasattr(risk_model, 'selected_features'):
                # 对于集成模型，需要对每个基模型应用特征选择
                if hasattr(risk_model, 'estimators_'):
                    # 直接使用模型预测（集成模型会处理内部逻辑）
                    severity_pred = risk_model.predict(feature_df)[0]
                else:
                    # 应用特征选择器
                    feature_selector = getattr(risk_model, 'feature_selector', None)
                    if feature_selector:
                        feature_df_selected = feature_selector.transform(feature_df)
                        severity_pred = risk_model.predict(feature_df_selected)[0]
                    else:
                        severity_pred = risk_model.predict(feature_df)[0]
            else:
                severity_pred = risk_model.predict(feature_df)[0]
                
            # 获取预测概率
            if hasattr(risk_model, 'predict_proba'):
                probabilities = risk_model.predict_proba(feature_df)[0]
                risk_score_pred = max(probabilities)
                # 计算加权风险分数
                weighted_score = sum(i * prob for i, prob in enumerate(probabilities)) / 3.0  # 归一化到0-1
            else:
                # 回退计算
                risk_score_pred = min(1.0, max(0.0, (severity_pred + 1) / 4.0))  # 映射4个级别到0-1
                weighted_score = risk_score_pred
            
            confidence = risk_score_pred
            
            # SHAP模型解释（如果启用）
            shap_explanation = None
            if MODEL_CONFIG.get('enable_shap_interpretation', False) and SHAP_AVAILABLE and not isinstance(risk_model, VotingClassifier):
                try:
                    # 为XGBoost模型创建SHAP解释器
                    if hasattr(risk_model, 'get_booster'):
                        explainer = shap.TreeExplainer(risk_model.get_booster())
                        # 准备输入数据
                        if hasattr(risk_model, 'selected_features'):
                            feature_selector = getattr(risk_model, 'feature_selector', None)
                            if feature_selector:
                                shap_input = feature_selector.transform(feature_df)
                            else:
                                shap_input = feature_df[risk_model.selected_features]
                        else:
                            shap_input = feature_df
                        
                        # 计算SHAP值
                        shap_values = explainer(shap_input)
                        
                        # 获取特征名称
                        if hasattr(risk_model, 'selected_features'):
                            feature_names = risk_model.selected_features
                        else:
                            feature_names = feature_df.columns.tolist()
                        
                        # 提取当前样本的SHAP值
                        sample_shap_values = shap_values[0].values
                        
                        # 创建特征重要性解释
                        feature_contributions = []
                        for i, (feature_name, shap_value) in enumerate(zip(feature_names, sample_shap_values)):
                            # 只考虑影响较大的特征
                            if abs(shap_value) > 0.01:
                                feature_contributions.append({
                                    'feature': feature_name,
                                    'contribution': float(shap_value),
                                    'impact': 'positive' if shap_value > 0 else 'negative'
                                })
                        
                        # 按贡献绝对值排序
                        feature_contributions.sort(key=lambda x: abs(x['contribution']), reverse=True)
                        shap_explanation = {
                            'top_contributing_features': feature_contributions[:5],  # 前5个贡献最大的特征
                            'explanation_available': True
                        }
                except Exception as e:
                    logger.warning(f"SHAP interpretation failed: {str(e)}")
                    shap_explanation = {
                        'explanation_available': False,
                        'error': str(e)
                    }
        except Exception as e:
            logger.warning(f"Error using model prediction: {str(e)}, falling back to enhanced rule-based scoring")
            # 降级到增强的基于规则的评分系统
            vuln_type = vulnerability.get('vuln_type', 'unknown')
            path = vulnerability.get('path', '')
            details = vulnerability.get('details', {})
            
            # 基于规则的多因素分数计算
            base_score = VULN_TYPE_WEIGHTS.get(vuln_type, 0.5)
            
            # 路径风险调整
            path_risk = features.get('path_risk_score', 0.4)
            
            # 利用特征中的已计算值
            exploitability = features.get('exploitability', 0.5)
            evidence_quality = features.get('evidence_quality', 0.7)
            data_sensitivity = features.get('data_sensitivity', 0.5)
            business_impact = features.get('business_impact', 0.5)
            
            # 漏洞利用可用性调整
            has_exploit = features.get('has_exploit', 0)
            exploit_factor = 1.2 if has_exploit else 1.0
            
            # 身份验证要求调整
            auth_required = features.get('auth_required', 1)
            auth_factor = 1.3 if auth_required == 0 else 1.0  # 无需认证提高风险
            
            # 综合分数（多因素加权）
            risk_score_pred = min(1.0, base_score * path_risk * exploit_factor * auth_factor)
            # 添加其他因素
            risk_score_pred = risk_score_pred * (0.7 + 0.3 * exploitability) * (0.8 + 0.2 * evidence_quality)
            # 业务影响和数据敏感性调整
            risk_score_pred = risk_score_pred * (0.6 + 0.4 * max(data_sensitivity, business_impact))
            
            weighted_score = risk_score_pred
            confidence = 0.5 + (risk_score_pred * 0.3)  # 基于分数的置信度估计
            
            # 根据分数确定严重程度
            if risk_score_pred >= 0.9:
                severity_pred = 3  # critical
            elif risk_score_pred >= 0.7:
                severity_pred = 2  # high
            elif risk_score_pred >= 0.4:
                severity_pred = 1  # medium
            else:
                severity_pred = 0  # low
        
        # 映射严重程度 - 支持4个级别
        severity_map = {0: 'low', 1: 'medium', 2: 'high', 3: 'critical'}
        severity = severity_map.get(severity_pred, 'medium')
        
        # 确保风险评分在0-1之间
        risk_score = min(1.0, max(0.0, weighted_score))
        
        # 识别主要风险因素
        top_risk_factors = []
        if features['exploitability'] > 0.7:
            top_risk_factors.append('高可利用性')
        if features['data_sensitivity'] > 0.8:
            top_risk_factors.append('涉及敏感数据')
        if features['has_exploit']:
            top_risk_factors.append('存在已知利用方式')
        if features['auth_required'] == 0:
            top_risk_factors.append('无需身份验证')
        if features['path_risk_score'] > 0.9:
            top_risk_factors.append('位于关键路径')
        if features['business_impact'] > 0.8:
            top_risk_factors.append('高业务影响')
        if features['tech_risk_score'] > 1.2:
            top_risk_factors.append('高风险技术栈')
        
        # 修复建议优先级
        priority_map = {
            'critical': '立即修复',
            'high': '高优先级',
            'medium': '中等优先级',
            'low': '低优先级'
        }
        
        # 返回详细的风险评估结果
        result = {
            'risk_score': round(risk_score, 4),
            'severity': severity,
            'confidence': round(confidence, 4),
            'risk_factors': {
                'vulnerability_type': vulnerability.get('vuln_type', 'unknown'),
                'path': path,
                'detection_method': 'machine_learning' if risk_model else 'enhanced_rules',
                'top_factors': top_risk_factors[:5],  # 最多返回5个主要因素
                'exploitability_score': round(features.get('exploitability', 0.5), 2),
                'business_impact': round(features.get('business_impact', 0.5), 2),
                'remediation_difficulty': round(features.get('remediation_difficulty', 0.5), 2),
                'remediation_priority': priority_map[severity],
                'attack_surface_complexity': round(features.get('attack_surface_complexity', 0.5), 2),
                'cvss_score': round(features.get('cvss_score', 0.0), 2),
                'historical_vulnerability_frequency': round(features.get('historical_vuln_freq', 0.0), 2)
            },
            'recommendations': {
                'priority': priority_map[severity],
                'estimated_effort': '高' if features.get('remediation_difficulty', 0.5) > 0.7 else \
                                   '中' if features.get('remediation_difficulty', 0.5) > 0.4 else '低',
                'validation_needed': True if severity in ['high', 'critical'] else False
            }
        }
        
        # 添加SHAP解释（如果有）
        if shap_explanation:
            result['model_interpretation'] = shap_explanation
            
        return result
        
    except Exception as e:
        logger.error(f"Error predicting risk: {str(e)}")
        # 返回默认值作为后备，但包含更多信息
        return {
            'risk_score': 0.5,  # 默认中等风险
            'severity': 'medium',
            'confidence': 0.5,
            'risk_factors': {
                'error': str(e),
                'vulnerability_type': vulnerability.get('vuln_type', 'unknown')
            },
            'recommendations': {
                'priority': '需要手动评估',
                'validation_needed': True
            },
            'error': True
        }

def batch_predict_risks(vulnerabilities):
    """批量预测多个漏洞的风险分数 - 提高处理效率"""
    results = []
    
    try:
        if not vulnerabilities:
            return results
        
        # 确保模型已初始化
        global risk_model
        if risk_model is None:
            init_risk_model()
        
        # 批量提取特征
        features_list = []
        for vuln in vulnerabilities:
            features = extract_features(vuln)
            features_list.append(features)
        
        # 创建特征矩阵
        X = pd.DataFrame(features_list)
        
        # 批量预测
        try:
            # 使用模型批量预测
            severity_preds = risk_model.predict(X)
            probabilities = risk_model.predict_proba(X) if hasattr(risk_model, 'predict_proba') else None
        except Exception as e:
            logger.warning(f"Batch prediction failed: {str(e)}, falling back to individual predictions")
            # 回退到逐个预测
            for vuln in vulnerabilities:
                results.append(predict_risk(vuln))
            return results
        
        # 处理批量结果
        for i, vuln in enumerate(vulnerabilities):
            severity_pred = severity_preds[i]
            
            # 计算风险分数
            if probabilities is not None:
                max_prob = max(probabilities[i])
                weighted_score = sum(j * prob for j, prob in enumerate(probabilities[i])) / 3.0  # 归一化
            else:
                weighted_score = (severity_pred + 1) / 4.0  # 简单映射
            
            risk_score = min(1.0, max(0.0, weighted_score))
            
            # 映射严重程度
            severity_map = {0: 'low', 1: 'medium', 2: 'high', 3: 'critical'}
            severity = severity_map.get(severity_pred, 'medium')
            
            results.append({
                'vuln_id': vuln.get('id', f'vuln_{i}'),
                'risk_score': round(risk_score, 4),
                'severity': severity
            })
        
        logger.info(f"Batch risk prediction completed for {len(vulnerabilities)} vulnerabilities")
    except Exception as e:
        logger.error(f"Batch risk prediction failed: {str(e)}")
        # 回退到逐个预测
        for vuln in vulnerabilities:
            results.append(predict_risk(vuln))
    
    return results

def calculate_overall_risk(vulnerabilities, tech_stack=None):
    """
    计算整体风险分数（增强版）
    使用加权综合评分系统
    """
    try:
        if not vulnerabilities:
            return 0.0
        
        # 严重级别权重配置
        weights = {
            'critical': 4.0,
            'high': 3.0,
            'medium': 2.0,
            'low': 1.0
        }
        
        # 风险分布分析权重
        distribution_weights = {
            'sql_injection': 1.2,
            'xss': 1.1,
            'info_leak': 0.9,
            'command_injection': 1.3,
            'csrf': 0.8,
            'security_headers': 0.7,
            'file_upload': 1.1,
            'path_traversal': 1.0,
            'ssrf': 1.2
        }
        
        # 计算加权分数
        weighted_scores = []
        total_weight = 0
        
        for vuln in vulnerabilities:
            # 获取严重级别
            severity = vuln.get('severity', 'low')
            vuln_type = vuln.get('vuln_type', 'unknown')
            
            # 基础分数
            base_score = vuln.get('risk_score', 0.5)
            
            # 应用严重级别权重
            severity_weight = weights.get(severity, 1.0)
            
            # 应用漏洞类型权重
            type_weight = distribution_weights.get(vuln_type, 1.0)
            
            # 置信度调整
            confidence = vuln.get('confidence', 0.5)
            
            # 计算最终权重
            final_weight = severity_weight * type_weight
            
            # 计算加权分数
            weighted_score = base_score * final_weight * (0.6 + 0.4 * confidence)
            
            weighted_scores.append(weighted_score)
            total_weight += final_weight
        
        if total_weight > 0:
            # 计算加权平均值
            avg_score = sum(weighted_scores) / total_weight
            
            # 应用风险分布调整因子（漏洞多样化会增加风险）
            vuln_types = set(v.get('vuln_type') for v in vulnerabilities)
            diversity_factor = 1.0 + (min(len(vuln_types) - 1, 5) * 0.05)  # 最多增加25%
            
            # 应用技术栈风险调整（如果提供）
            tech_factor = 1.0
            if tech_stack:
                for tech in tech_stack:
                    if tech in TECH_STACK_RISK_WEIGHTS:
                        tech_factor = max(tech_factor, TECH_STACK_RISK_WEIGHTS[tech])
            
            # 最终分数
            final_score = avg_score * diversity_factor * tech_factor
            return round(min(final_score, 1.0), 3)
        
        return 0.0
        
    except Exception as e:
        logger.error(f"计算整体风险失败: {str(e)}")
        return 0.0

def generate_risk_report(vulnerabilities, target_info=None):
    """
    生成增强版风险报告
    包含详细的风险分析和趋势预测
    """
    try:
        # 基本统计信息
        total_vulns = len(vulnerabilities)
        
        # 如果没有漏洞，返回空报告
        if total_vulns == 0:
            return {
                'overall_score': 0.0,
                'overall_level': 'low',
                'severity_distribution': {
                    'critical': 0,
                    'high': 0,
                    'medium': 0,
                    'low': 0
                },
                'type_distribution': {},
                'total_vulnerabilities': 0,
                'summary': '未发现漏洞',
                'risk_factors': []
            }
        
        # 统计各严重级别的漏洞数量
        severity_count = Counter()
        for vuln in vulnerabilities:
            severity = vuln.get('severity', 'low')
            severity_count[severity] += 1
        
        # 确保所有严重级别都有计数
        severity_distribution = {
            'critical': severity_count.get('critical', 0),
            'high': severity_count.get('high', 0),
            'medium': severity_count.get('medium', 0),
            'low': severity_count.get('low', 0)
        }
        
        # 统计各类型的漏洞数量
        type_count = Counter()
        for vuln in vulnerabilities:
            vuln_type = vuln.get('vuln_type', 'unknown')
            type_count[vuln_type] += 1
        
        # 技术栈分析
        tech_stack = []
        if target_info and 'tech_stack' in target_info:
            tech_stack = target_info['tech_stack']
        elif target_info and 'response' in target_info:
            try:
                tech_stack = detect_tech_stack(target_info['response'])
            except:
                pass
        
        # 计算整体风险分数
        overall_score = calculate_overall_risk(vulnerabilities, tech_stack)
        
        # 确定整体安全级别
        if overall_score >= 0.8:
            overall_level = 'critical'
        elif overall_score >= 0.6:
            overall_level = 'high'
        elif overall_score >= 0.4:
            overall_level = 'medium'
        else:
            overall_level = 'low'
        
        # 识别主要风险因素
        top_risk_factors = []
        
        # 按严重级别统计
        if severity_count.get('critical', 0) > 0:
            top_risk_factors.append(f"发现 {severity_count['critical']} 个严重级别漏洞")
        if severity_count.get('high', 0) > 2:
            top_risk_factors.append(f"发现 {severity_count['high']} 个高危漏洞")
        
        # 按漏洞类型统计
        if type_count.get('command_injection', 0) > 0:
            top_risk_factors.append("存在命令注入漏洞风险")
        if type_count.get('sql_injection', 0) > 0:
            top_risk_factors.append("存在SQL注入漏洞风险")
        
        # 技术栈风险
        high_risk_tech = []
        for tech in tech_stack:
            if tech in TECH_STACK_RISK_WEIGHTS and TECH_STACK_RISK_WEIGHTS[tech] > 1.1:
                high_risk_tech.append(tech)
        
        if high_risk_tech:
            top_risk_factors.append(f"使用高风险技术栈: {', '.join(high_risk_tech)}")
        
        # 生成简要总结
        if overall_level == 'critical':
            summary = f"系统存在严重安全风险！发现多个高危漏洞，需要立即修复。"
        elif overall_level == 'high':
            summary = f"系统存在较高安全风险，发现多个重要漏洞，建议尽快修复。"
        elif overall_level == 'medium':
            summary = f"系统存在中等安全风险，发现一些漏洞，建议在合理时间内修复。"
        else:
            summary = f"系统安全状况良好，只发现少量低风险漏洞。"
        
        # 预测修复优先级
        prioritized_vuln_types = sorted(
            type_count.items(),
            key=lambda x: (
                VULN_TYPE_WEIGHTS.get(x[0], 0.5),
                x[1]
            ),
            reverse=True
        )
        
        # 生成详细报告
        return {
            'overall_score': overall_score,
            'overall_level': overall_level,
            'severity_distribution': severity_distribution,
            'type_distribution': dict(type_count),
            'total_vulnerabilities': total_vulns,
            'top_risk_factors': top_risk_factors[:5],  # 最多5个主要风险因素
            'summary': summary,
            'tech_stack_analysis': {
                'detected_technologies': tech_stack,
                'high_risk_components': high_risk_tech
            },
            'recommended_fix_order': [v[0] for v in prioritized_vuln_types[:3]],
            'confidence_score': round(np.mean([v.get('confidence', 0.5) for v in vulnerabilities]), 2),
            'scan_timestamp': target_info.get('scan_time', datetime.now().isoformat())
        }
        
    except Exception as e:
        logger.error(f"生成风险报告失败: {str(e)}")
        # 返回基本错误报告
        return {
            'overall_score': 0.0,
            'overall_level': 'unknown',
            'severity_distribution': {},
            'type_distribution': {},
            'total_vulnerabilities': len(vulnerabilities),
            'summary': f"生成报告时出错: {str(e)}",
            'error': str(e)
        }

def get_risk_distribution(vulnerabilities):
    """获取漏洞风险分布统计"""
    if not vulnerabilities:
        return {'low': 0, 'medium': 0, 'high': 0}
    
    # 初始化计数器
    distribution = {'low': 0, 'medium': 0, 'high': 0}
    
    # 统计每种严重程度的漏洞数量
    for vuln in vulnerabilities:
        severity = vuln.get('severity', 'medium')
        if severity in distribution:
            distribution[severity] += 1
        else:
            distribution['medium'] += 1  # 默认归为中等
    
    return distribution

def get_risk_timeline(vulnerabilities):
    """获取漏洞时间线数据"""
    if not vulnerabilities:
        return []
    
    # 按日期分组统计漏洞数量
    timeline = {}
    
    for vuln in vulnerabilities:
        # 解析创建时间
        created_at = vuln.get('created_at')
        if created_at:
            # 格式化日期为YYYY-MM-DD
            if isinstance(created_at, str):
                try:
                    # 尝试解析ISO格式的日期字符串
                    date = datetime.fromisoformat(created_at).strftime('%Y-%m-%d')
                except:
                    date = 'unknown'
            else:
                date = created_at.strftime('%Y-%m-%d')
            
            # 更新计数
            if date not in timeline:
                timeline[date] = 0
            timeline[date] += 1
    
    # 转换为列表格式
    return [{'date': date, 'count': count} for date, count in timeline.items()]

# 初始化模型
init_risk_model()


class EnhancedRuleBasedRiskModel:
    """增强的基于规则的风险评估模型，作为机器学习模型的降级方案"""
    
    def predict(self, X):
        """预测漏洞严重程度（0=低, 1=中, 2=高, 3=严重）"""
        results = []
        for _, row in X.iterrows():
            # 提取特征
            vuln_type_idx = row.get('vuln_type_encoded', 0)
            exploitability = row.get('exploitability', 0)
            evidence_quality = row.get('evidence_quality', 0.5)
            exposure_surface = row.get('exposure_surface', 1)
            param_location_risk = row.get('param_location_risk', 1.0)
            tech_risk_score = row.get('tech_risk_score', 1.0)
            pattern_match_score = row.get('pattern_match_score', 0)
            path_risk_score = row.get('path_risk_score', 0.4)
            
            # 获取漏洞类型
            vuln_type = list(VULN_TYPE_WEIGHTS.keys())[min(int(vuln_type_idx), len(VULN_TYPE_WEIGHTS)-1)]
            
            # 基础分数
            base_score = VULN_TYPE_WEIGHTS.get(vuln_type, 0.5)
            
            # 多因素风险调整
            exploit_adj = 0.2 * exploitability
            quality_adj = 0.15 * evidence_quality
            exposure_adj = 0.1 * (exposure_surface - 1)
            pattern_adj = 0.2 * pattern_match_score
            
            # 综合分数计算
            score = base_score * path_risk_score * param_location_risk * min(1.3, tech_risk_score)
            score += exploit_adj + quality_adj + exposure_adj + pattern_adj
            score = min(1.0, score)
            
            # 确定严重程度（4个级别）
            if score >= 0.9:
                results.append(3)  # 严重
            elif score >= 0.7:
                results.append(2)  # 高
            elif score >= 0.4:
                results.append(1)  # 中
            else:
                results.append(0)  # 低
        
        return np.array(results)
    
    def predict_proba(self, X):
        """预测概率分布（增强版，支持4个类别）"""
        results = []
        for _, row in X.iterrows():
            # 提取特征
            vuln_type_idx = row.get('vuln_type_encoded', 0)
            exploitability = row.get('exploitability', 0)
            evidence_quality = row.get('evidence_quality', 0.5)
            
            # 获取漏洞类型
            vuln_type = list(VULN_TYPE_WEIGHTS.keys())[min(int(vuln_type_idx), len(VULN_TYPE_WEIGHTS)-1)]
            
            # 基础分数
            base_score = VULN_TYPE_WEIGHTS.get(vuln_type, 0.5)
            
            # 调整分数
            score = base_score * (0.8 + 0.2 * exploitability + 0.1 * evidence_quality)
            score = min(1.0, score)
            
            # 根据综合分数生成概率分布（4个类别）
            if score >= 0.9:
                # 严重
                results.append([0.02, 0.03, 0.15, 0.8])
            elif score >= 0.7:
                # 高
                results.append([0.05, 0.1, 0.7, 0.15])
            elif score >= 0.4:
                # 中
                results.append([0.15, 0.7, 0.15, 0])
            else:
                # 低
                results.append([0.8, 0.18, 0.02, 0])
        
        return np.array(results)

# 替换默认的规则模型
def create_default_model():
    """创建默认的风险评估模型（使用增强的规则模型）"""
    logger.info("Creating enhanced rule-based default model")
    return EnhancedRuleBasedRiskModel()


def track_model_performance(model, X, y):
    """跟踪并记录模型性能"""
    try:
        # 确保性能跟踪目录存在
        os.makedirs(os.path.dirname(MODEL_PERFORMANCE_PATH), exist_ok=True)
        
        # 计算性能指标
        y_pred = model.predict(X)
        y_pred_proba = model.predict_proba(X)
        
        # 计算准确率、精确率、召回率和F1分数
        accuracy = accuracy_score(y, y_pred)
        precision = precision_score(y, y_pred, average='weighted')
        recall = recall_score(y, y_pred, average='weighted')
        f1 = f1_score(y, y_pred, average='weighted')
        
        # 计算ROC AUC（多分类需要特殊处理）
        try:
            if len(np.unique(y)) > 2:
                # 多分类情况下使用macro平均
                roc_auc = roc_auc_score(y, y_pred_proba, multi_class='ovr', average='macro')
            else:
                roc_auc = roc_auc_score(y, y_pred_proba[:, 1])
        except Exception as e:
            logger.warning(f"Could not compute ROC AUC: {str(e)}")
            roc_auc = 0.5
        
        # 创建性能记录
        performance_record = {
            'timestamp': datetime.now().isoformat(),
            'model_type': MODEL_CONFIG['type'],
            'model_version': model_metadata.get('version', '1.0') if 'model_metadata' in globals() else '1.0',
            'sample_size': len(X),
            'metrics': {
                'accuracy': accuracy,
                'precision': precision,
                'recall': recall,
                'f1_score': f1,
                'roc_auc': roc_auc
            },
            'training_time': datetime.now().isoformat()  # 可以在调用时替换为实际训练时间
        }
        
        # 读取现有性能记录或创建新的
        performance_history = []
        if os.path.exists(MODEL_PERFORMANCE_PATH):
            try:
                with open(MODEL_PERFORMANCE_PATH, 'r') as f:
                    performance_history = json.load(f)
            except Exception as e:
                logger.warning(f"Error loading performance history: {str(e)}")
        
        # 添加新记录
        performance_history.append(performance_record)
        
        # 限制历史记录数量（只保留最近10条）
        if len(performance_history) > 10:
            performance_history = performance_history[-10:]
        
        # 保存性能记录
        with open(MODEL_PERFORMANCE_PATH, 'w') as f:
            json.dump(performance_history, f, indent=4)
        
        logger.info(f"Model performance tracked: accuracy={accuracy:.4f}, f1={f1:.4f}")
        
    except Exception as e:
        logger.error(f"Error tracking model performance: {str(e)}")


# 风险评估服务类 - 用于依赖注入和服务管理
class RiskAssessmentService:
    """风险评估服务类，提供统一的风险评估接口"""
    
    def __init__(self):
        self.model = None
        self._init_service()
    
    def _init_service(self):
        """初始化服务"""
        # 初始化风险模型
        init_risk_model()
        # 获取全局风险模型实例
        global risk_model
        self.model = risk_model
    
    def assess_vulnerability(self, vulnerability):
        """评估单个漏洞风险"""
        return predict_risk(vulnerability)
    
    def assess_batch_vulnerabilities(self, vulnerabilities):
        """批量评估漏洞风险"""
        return batch_predict_risks(vulnerabilities)
    
    def calculate_overall_risk(self, vulnerabilities):
        """计算整体风险分数"""
        return calculate_overall_risk(vulnerabilities)
    
    def generate_risk_report(self, vulnerabilities, scan_info=None):
        """生成风险评估报告"""
        return generate_risk_report(vulnerabilities, scan_info)
    
    def retrain_model(self, force_retrain=False):
        """重新训练模型"""
        return retrain_model(force_retrain)

# 创建全局服务实例
risk_assessment_service = RiskAssessmentService()