"""
风险预警模型
基于历史督导数据预测高发问题区域和整改不力单位
"""

import pandas as pd
import numpy as np
import logging
import joblib
from typing import Dict, List, Tuple, Optional
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score
import xgboost as xgb
import lightgbm as lgb
import catboost as cb
from config.model_config import config
import optuna
import matplotlib.pyplot as plt
import seaborn as sns

class RiskPredictionModel:
    """风险预警模型"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.model = None
        self.feature_importance = None
        self.model_config = config.risk_prediction
        
    def train(self, features: pd.DataFrame, targets: pd.DataFrame) -> Dict:
        """训练风险预警模型"""
        
        self.logger.info("开始训练风险预警模型...")
        
        # 1. 数据准备
        X, y = self._prepare_training_data(features, targets)
        
        # 2. 数据分割
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, 
            test_size=self.model_config.test_size,
            random_state=self.model_config.random_state,
            stratify=y
        )
        
        # 3. 超参数优化
        best_params = self._optimize_hyperparameters(X_train, y_train)
        
        # 4. 训练最终模型
        self.model = self._create_model(best_params)
        self.model.fit(X_train, y_train)
        
        # 5. 模型评估
        evaluation_results = self._evaluate_model(X_test, y_test)
        
        # 6. 特征重要性分析
        self._analyze_feature_importance(X.columns)
        
        # 7. 保存模型
        self._save_model()
        
        self.logger.info("风险预警模型训练完成")
        return evaluation_results
    
    def predict_risk_areas(self, features: pd.DataFrame, top_k: int = 10) -> List[Dict]:
        """预测高风险区域"""
        
        if self.model is None:
            raise ValueError("模型尚未训练，请先调用train()方法")
        
        # 预测概率
        risk_probs = self.model.predict_proba(features)[:, 1]
        
        # 创建结果
        results = []
        for i, prob in enumerate(risk_probs):
            results.append({
                'area_code': features.iloc[i]['area_code'] if 'area_code' in features.columns else i,
                'risk_probability': float(prob),
                'risk_level': self._classify_risk_level(prob)
            })
        
        # 按风险概率排序，返回前K个
        results = sorted(results, key=lambda x: x['risk_probability'], reverse=True)
        return results[:top_k]
    
    def predict_rectification_difficulty(self, area_features: pd.DataFrame) -> List[Dict]:
        """预测整改不力单位"""
        
        # 基于历史整改率和当前问题复杂度预测
        rectification_scores = []
        
        for _, row in area_features.iterrows():
            # 计算整改难度得分
            difficulty_score = self._calculate_rectification_difficulty(row)
            
            rectification_scores.append({
                'area_code': row.get('area_code', ''),
                'difficulty_score': difficulty_score,
                'predicted_success_rate': 1 - difficulty_score,
                'risk_factors': self._identify_risk_factors(row)
            })
        
        # 按难度排序
        rectification_scores = sorted(rectification_scores, key=lambda x: x['difficulty_score'], reverse=True)
        return rectification_scores
    
    def get_feature_importance(self) -> Dict[str, float]:
        """获取特征重要性"""
        if self.feature_importance is None:
            raise ValueError("特征重要性尚未计算，请先训练模型")
        return self.feature_importance
    
    def _prepare_training_data(self, features: pd.DataFrame, targets: pd.DataFrame) -> Tuple[pd.DataFrame, pd.Series]:
        """准备训练数据"""
        
        # 合并特征和目标
        merged_data = pd.merge(features, targets, on='area_code', how='inner')
        
        # 分离特征和目标
        feature_columns = [col for col in merged_data.columns if col not in ['area_code', 'date', 'future_problems', 'risk_level']]
        X = merged_data[feature_columns]
        y = merged_data['risk_level']
        
        # 处理缺失值
        X = X.fillna(X.mean())
        
        self.logger.info(f"训练数据维度: X={X.shape}, y={y.shape}")
        self.logger.info(f"正负样本比例: {y.value_counts().to_dict()}")
        
        return X, y
    
    def _optimize_hyperparameters(self, X_train: pd.DataFrame, y_train: pd.Series) -> Dict:
        """超参数优化"""
        
        self.logger.info("开始超参数优化...")
        
        def objective(trial):
            params = {
                'n_estimators': trial.suggest_int('n_estimators', 100, 2000),
                'max_depth': trial.suggest_int('max_depth', 3, 15),
                'learning_rate': trial.suggest_float('learning_rate', 0.01, 0.3),
                'subsample': trial.suggest_float('subsample', 0.6, 1.0),
                'colsample_bytree': trial.suggest_float('colsample_bytree', 0.6, 1.0),
                'reg_alpha': trial.suggest_float('reg_alpha', 0, 10),
                'reg_lambda': trial.suggest_float('reg_lambda', 0, 10)
            }
            
            model = xgb.XGBClassifier(**params, random_state=self.model_config.random_state)
            score = cross_val_score(model, X_train, y_train, cv=3, scoring='roc_auc').mean()
            return score
        
        study = optuna.create_study(direction='maximize')
        study.optimize(objective, n_trials=50)
        
        self.logger.info(f"最佳参数: {study.best_params}")
        self.logger.info(f"最佳得分: {study.best_value}")
        
        return study.best_params
    
    def _create_model(self, params: Dict):
        """创建模型"""
        
        if self.model_config.model_type == 'xgboost':
            return xgb.XGBClassifier(**params, random_state=self.model_config.random_state)
        elif self.model_config.model_type == 'lightgbm':
            return lgb.LGBMClassifier(**params, random_state=self.model_config.random_state)
        elif self.model_config.model_type == 'catboost':
            return cb.CatBoostClassifier(**params, random_state=self.model_config.random_state, verbose=False)
        else:
            raise ValueError(f"不支持的模型类型: {self.model_config.model_type}")
    
    def _evaluate_model(self, X_test: pd.DataFrame, y_test: pd.Series) -> Dict:
        """模型评估"""
        
        # 预测
        y_pred = self.model.predict(X_test)
        y_pred_proba = self.model.predict_proba(X_test)[:, 1]
        
        # 计算指标
        auc_score = roc_auc_score(y_test, y_pred_proba)
        classification_rep = classification_report(y_test, y_pred, output_dict=True)
        
        # 混淆矩阵
        cm = confusion_matrix(y_test, y_pred)
        
        results = {
            'auc_score': auc_score,
            'classification_report': classification_rep,
            'confusion_matrix': cm.tolist(),
            'accuracy': classification_rep['accuracy'],
            'precision': classification_rep['1']['precision'],
            'recall': classification_rep['1']['recall'],
            'f1_score': classification_rep['1']['f1-score']
        }
        
        self.logger.info(f"模型评估结果: AUC={auc_score:.4f}, Accuracy={results['accuracy']:.4f}")
        
        # 可视化结果
        self._plot_evaluation_results(y_test, y_pred, y_pred_proba, cm)
        
        return results
    
    def _analyze_feature_importance(self, feature_names: List[str]):
        """分析特征重要性"""
        
        if hasattr(self.model, 'feature_importances_'):
            importances = self.model.feature_importances_
            self.feature_importance = dict(zip(feature_names, importances))
            
            # 排序并记录
            sorted_features = sorted(self.feature_importance.items(), key=lambda x: x[1], reverse=True)
            
            self.logger.info("特征重要性排序:")
            for feature, importance in sorted_features[:10]:
                self.logger.info(f"{feature}: {importance:.4f}")
            
            # 可视化特征重要性
            self._plot_feature_importance(sorted_features[:20])
    
    def _classify_risk_level(self, probability: float) -> str:
        """根据概率分类风险等级"""
        if probability >= 0.8:
            return "极高"
        elif probability >= 0.6:
            return "高"
        elif probability >= 0.4:
            return "中"
        else:
            return "低"
    
    def _calculate_rectification_difficulty(self, row: pd.Series) -> float:
        """计算整改难度得分"""
        
        difficulty_score = 0.0
        
        # 历史整改率
        if 'avg_rectification_rate' in row:
            difficulty_score += (1 - row['avg_rectification_rate']) * 0.4
        
        # 问题复杂度
        if 'problem_type_count' in row:
            difficulty_score += min(row['problem_type_count'] / 5.0, 1.0) * 0.3
        
        # 严重程度
        if 'severity_score' in row:
            difficulty_score += min(row['severity_score'] / 10.0, 1.0) * 0.3
        
        return min(difficulty_score, 1.0)
    
    def _identify_risk_factors(self, row: pd.Series) -> List[str]:
        """识别风险因素"""
        
        risk_factors = []
        
        if row.get('avg_rectification_rate', 1.0) < 0.5:
            risk_factors.append("历史整改率低")
        
        if row.get('problem_type_count', 0) > 3:
            risk_factors.append("问题类型复杂")
        
        if row.get('severity_score', 0) > 5:
            risk_factors.append("问题严重程度高")
        
        if row.get('problem_count', 0) > 10:
            risk_factors.append("问题数量多")
        
        return risk_factors
    
    def _plot_evaluation_results(self, y_test, y_pred, y_pred_proba, cm):
        """绘制评估结果图表"""
        
        fig, axes = plt.subplots(2, 2, figsize=(12, 10))
        
        # 混淆矩阵
        sns.heatmap(cm, annot=True, fmt='d', ax=axes[0, 0])
        axes[0, 0].set_title('混淆矩阵')
        axes[0, 0].set_xlabel('预测值')
        axes[0, 0].set_ylabel('真实值')
        
        # ROC曲线
        from sklearn.metrics import roc_curve
        fpr, tpr, _ = roc_curve(y_test, y_pred_proba)
        axes[0, 1].plot(fpr, tpr, label=f'ROC曲线 (AUC = {roc_auc_score(y_test, y_pred_proba):.2f})')
        axes[0, 1].plot([0, 1], [0, 1], 'k--')
        axes[0, 1].set_xlabel('假正率')
        axes[0, 1].set_ylabel('真正率')
        axes[0, 1].set_title('ROC曲线')
        axes[0, 1].legend()
        
        # 预测概率分布
        axes[1, 0].hist(y_pred_proba[y_test == 0], alpha=0.5, label='负样本', bins=20)
        axes[1, 0].hist(y_pred_proba[y_test == 1], alpha=0.5, label='正样本', bins=20)
        axes[1, 0].set_xlabel('预测概率')
        axes[1, 0].set_ylabel('频率')
        axes[1, 0].set_title('预测概率分布')
        axes[1, 0].legend()
        
        # 校准曲线
        from sklearn.calibration import calibration_curve
        fraction_of_positives, mean_predicted_value = calibration_curve(y_test, y_pred_proba, n_bins=10)
        axes[1, 1].plot(mean_predicted_value, fraction_of_positives, "s-", label="模型")
        axes[1, 1].plot([0, 1], [0, 1], "k:", label="完美校准")
        axes[1, 1].set_xlabel('平均预测概率')
        axes[1, 1].set_ylabel('正样本比例')
        axes[1, 1].set_title('校准曲线')
        axes[1, 1].legend()
        
        plt.tight_layout()
        plt.savefig(f'{config.data.log_dir}/risk_model_evaluation.png')
        plt.close()
    
    def _plot_feature_importance(self, sorted_features: List[Tuple[str, float]]):
        """绘制特征重要性图表"""
        
        features, importances = zip(*sorted_features)
        
        plt.figure(figsize=(10, 8))
        plt.barh(range(len(features)), importances)
        plt.yticks(range(len(features)), features)
        plt.xlabel('重要性')
        plt.title('特征重要性排序')
        plt.tight_layout()
        plt.savefig(f'{config.data.log_dir}/feature_importance.png')
        plt.close()
    
    def _save_model(self):
        """保存模型"""
        model_path = f"{config.data.model_dir}/{self.model_config.model_name}.pkl"
        joblib.dump({
            'model': self.model,
            'feature_importance': self.feature_importance,
            'config': self.model_config
        }, model_path)
        
        self.logger.info(f"模型已保存到: {model_path}")
    
    def load_model(self, model_path: str = None):
        """加载模型"""
        if model_path is None:
            model_path = f"{config.data.model_dir}/{self.model_config.model_name}.pkl"
        
        model_data = joblib.load(model_path)
        self.model = model_data['model']
        self.feature_importance = model_data['feature_importance']
        
        self.logger.info(f"模型已从 {model_path} 加载")

# 模型训练器
class RiskModelTrainer:
    """风险模型训练器"""
    
    def __init__(self):
        self.logger = logging.getLogger(__name__)
        self.model = RiskPredictionModel()
    
    def train_and_evaluate(self, features: pd.DataFrame, targets: pd.DataFrame) -> Dict:
        """训练和评估模型"""
        
        self.logger.info("开始风险预警模型训练流程...")
        
        # 训练模型
        evaluation_results = self.model.train(features, targets)
        
        # 生成训练报告
        report = self._generate_training_report(evaluation_results)
        
        return {
            'model': self.model,
            'evaluation_results': evaluation_results,
            'training_report': report
        }
    
    def _generate_training_report(self, evaluation_results: Dict) -> Dict:
        """生成训练报告"""
        
        report = {
            'model_performance': {
                'auc_score': evaluation_results['auc_score'],
                'accuracy': evaluation_results['accuracy'],
                'precision': evaluation_results['precision'],
                'recall': evaluation_results['recall'],
                'f1_score': evaluation_results['f1_score']
            },
            'feature_importance': self.model.get_feature_importance(),
            'model_config': self.model.model_config.__dict__,
            'training_timestamp': pd.Timestamp.now().isoformat()
        }
        
        # 保存报告
        report_path = f"{config.data.log_dir}/risk_model_training_report.json"
        import json
        with open(report_path, 'w', encoding='utf-8') as f:
            json.dump(report, f, ensure_ascii=False, indent=2)
        
        self.logger.info(f"训练报告已保存到: {report_path}")
        
        return report 