""" 数据/概念漂移检测（PSI/KS指标） """
"""
模型漂移检测器
用于检测输入数据分布和模型性能的变化
"""
import numpy as np
from scipy import stats
from sklearn.metrics import confusion_matrix
from typing import Dict, List, Any
import logging
from datetime import datetime, timedelta

logger = logging.getLogger(__name__)


class DataDriftDetector:
    """数据漂移检测器"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化数据漂移检测器
        
        Args:
            config: 配置参数
        """
        self.config = config
        self.reference_distribution = None
        self.threshold = config.get('threshold', 0.2)
        self.drift_history = []
        
    def fit(self, reference_data: np.ndarray) -> None:
        """
        拟合参考数据分布
        
        Args:
            reference_data: 参考数据，形状为 [样本数, 特征数]
        """
        logger.info(f"拟合参考数据分布，样本数: {reference_data.shape[0]}")
        self.reference_distribution = {
            'mean': np.mean(reference_data, axis=0),
            'std': np.std(reference_data, axis=0),
            'shape': reference_data.shape
        }
        
    def detect_drift(self, current_data: np.ndarray) -> Dict[str, Any]:
        """
        检测数据漂移
        
        Args:
            current_data: 当前数据，形状为 [样本数, 特征数]
            
        Returns:
            漂移检测结果
        """
        if self.reference_distribution is None:
            logger.warning("参考数据分布未拟合，无法检测漂移")
            return {'drift_detected': False, 'reason': 'reference_distribution_not_fitted'}
            
        # 检查数据形状是否匹配
        if current_data.shape[1] != self.reference_distribution['shape'][1]:
            logger.warning(f"数据形状不匹配: 当前 {current_data.shape[1]} vs 参考 {self.reference_distribution['shape'][1]}")
            return {'drift_detected': True, 'reason': 'shape_mismatch'}
            
        # 计算当前数据分布
        current_mean = np.mean(current_data, axis=0)
        current_std = np.std(current_data, axis=0)
        
        # 计算PSI（Population Stability Index）
        psi_values = self._calculate_psi(current_data, current_mean, current_std)
        
        # 计算KS统计量
        ks_statistics = self._calculate_ks(current_data)
        
        # 检测漂移
        drift_detected = np.any(psi_values > self.threshold) or np.any(ks_statistics > self.threshold)
        
        # 记录历史
        drift_record = {
            'timestamp': datetime.now(),
            'drift_detected': drift_detected,
            'psi_values': psi_values.tolist(),
            'ks_statistics': ks_statistics.tolist(),
            'current_mean': current_mean.tolist(),
            'current_std': current_std.tolist()
        }
        
        self.drift_history.append(drift_record)
        
        # 限制历史记录长度
        max_history = self.config.get('max_history', 100)
        if len(self.drift_history) > max_history:
            self.drift_history = self.drift_history[-max_history:]
            
        logger.info(f"数据漂移检测结果: {drift_detected}")
        
        return {
            'drift_detected': drift_detected,
            'psi_values': psi_values.tolist(),
            'ks_statistics': ks_statistics.tolist(),
            'threshold': self.threshold,
            'current_mean': current_mean.tolist(),
            'current_std': current_std.tolist(),
            'reference_mean': self.reference_distribution['mean'].tolist(),
            'reference_std': self.reference_distribution['std'].tolist()
        }
        
    def _calculate_psi(self, current_data: np.ndarray, current_mean: np.ndarray, current_std: np.ndarray) -> np.ndarray:
        """
        计算Population Stability Index (PSI)
        
        Args:
            current_data: 当前数据
            current_mean: 当前数据均值
            current_std: 当前数据标准差
            
        Returns:
            每个特征的PSI值
        """
        psi_values = []
        ref_mean = self.reference_distribution['mean']
        ref_std = self.reference_distribution['std']
        
        for i in range(current_data.shape[1]):
            try:
                # 创建分箱
                bins = np.linspace(min(ref_mean[i] - 3 * ref_std[i], current_mean[i] - 3 * current_std[i]),
                                  max(ref_mean[i] + 3 * ref_std[i], current_mean[i] + 3 * current_std[i]),
                                  10)
                
                # 计算参考分布和当前分布在每个分箱中的频率
                ref_counts, _ = np.histogram(self.reference_distribution['mean'][i] + self.reference_distribution['std'][i] * np.random.randn(1000), bins=bins)
                curr_counts, _ = np.histogram(current_data[:, i], bins=bins)
                
                # 归一化频率
                ref_probs = ref_counts / (np.sum(ref_counts) + 1e-10)
                curr_probs = curr_counts / (np.sum(curr_counts) + 1e-10)
                
                # 计算PSI
                psi = np.sum((curr_probs - ref_probs) * np.log((curr_probs + 1e-10) / (ref_probs + 1e-10)))
                psi_values.append(psi)
            except Exception as e:
                logger.error(f"计算PSI时出错: {str(e)}")
                psi_values.append(0.0)
                
        return np.array(psi_values)
        
    def _calculate_ks(self, current_data: np.ndarray) -> np.ndarray:
        """
        计算Kolmogorov-Smirnov统计量
        
        Args:
            current_data: 当前数据
            
        Returns:
            每个特征的KS统计量
        """
        ks_statistics = []
        
        for i in range(current_data.shape[1]):
            try:
                # 生成参考分布样本
                ref_samples = self.reference_distribution['mean'][i] + self.reference_distribution['std'][i] * np.random.randn(1000)
                
                # 计算KS统计量
                ks_stat, _ = stats.ks_2samp(ref_samples, current_data[:, i])
                ks_statistics.append(ks_stat)
            except Exception as e:
                logger.error(f"计算KS统计量时出错: {str(e)}")
                ks_statistics.append(0.0)
                
        return np.array(ks_statistics)
        
    def get_drift_history(self, days: int = 7) -> List[Dict[str, Any]]:
        """
        获取最近几天的漂移历史
        
        Args:
            days: 天数
            
        Returns:
            漂移历史记录列表
        """
        cutoff_time = datetime.now() - timedelta(days=days)
        return [record for record in self.drift_history if record['timestamp'] >= cutoff_time]


class ModelDriftDetector:
    """模型性能漂移检测器"""
    
    def __init__(self, config: Dict[str, Any]):
        """
        初始化模型性能漂移检测器
        
        Args:
            config: 配置参数
        """
        self.config = config
        self.reference_metrics = None
        self.metric_thresholds = config.get('metric_thresholds', {
            'f1_drop': 0.05,
            'precision_drop': 0.05,
            'recall_drop': 0.05,
            'ppl_increase': 0.1
        })
        self.performance_history = []
        
    def set_reference_metrics(self, metrics: Dict[str, float]) -> None:
        """
        设置参考性能指标
        
        Args:
            metrics: 参考性能指标
        """
        logger.info(f"设置参考性能指标: {metrics}")
        self.reference_metrics = metrics
        
    def detect_performance_drift(self, current_metrics: Dict[str, float]) -> Dict[str, Any]:
        """
        检测模型性能漂移
        
        Args:
            current_metrics: 当前性能指标
            
        Returns:
            性能漂移检测结果
        """
        if self.reference_metrics is None:
            logger.warning("参考性能指标未设置，无法检测性能漂移")
            return {'drift_detected': False, 'reason': 'reference_metrics_not_set'}
            
        # 计算指标变化
        metric_changes = {}
        drift_metrics = []
        
        for metric_name, current_value in current_metrics.items():
            if metric_name in self.reference_metrics:
                ref_value = self.reference_metrics[metric_name]
                
                if metric_name in ['ppl', 'loss']:
                    # 困惑度和损失值越高越差
                    change = (current_value - ref_value) / (ref_value + 1e-10)
                    threshold_key = 'ppl_increase' if metric_name == 'ppl' else 'loss_increase'
                else:
                    # 其他指标越高越好
                    change = (ref_value - current_value) / (ref_value + 1e-10)
                    threshold_key = f'{metric_name}_drop' if f'{metric_name}_drop' in self.metric_thresholds else 'default'
                    
                metric_changes[metric_name] = change
                
                # 检查是否超过阈值
                threshold = self.metric_thresholds.get(threshold_key, 0.05)
                if change > threshold:
                    drift_metrics.append({
                        'metric': metric_name,
                        'current': current_value,
                        'reference': ref_value,
                        'change': change,
                        'threshold': threshold
                    })
        
        # 记录历史
        performance_record = {
            'timestamp': datetime.now(),
            'metrics': current_metrics,
            'changes': metric_changes,
            'drift_detected': len(drift_metrics) > 0,
            'drift_metrics': drift_metrics
        }
        
        self.performance_history.append(performance_record)
        
        # 限制历史记录长度
        max_history = self.config.get('max_history', 100)
        if len(self.performance_history) > max_history:
            self.performance_history = self.performance_history[-max_history:]
            
        logger.info(f"模型性能漂移检测结果: {len(drift_metrics) > 0}")
        
        return {
            'drift_detected': len(drift_metrics) > 0,
            'drift_metrics': drift_metrics,
            'metric_changes': metric_changes,
            'current_metrics': current_metrics,
            'reference_metrics': self.reference_metrics
        }
        
    def get_performance_history(self, days: int = 7) -> List[Dict[str, Any]]:
        """
        获取最近几天的性能历史
        
        Args:
            days: 天数
            
        Returns:
            性能历史记录列表
        """
        cutoff_time = datetime.now() - timedelta(days=days)
        return [record for record in self.performance_history if record['timestamp'] >= cutoff_time]
        
    def calculate_confusion_matrix_metrics(self, y_true: np.ndarray, y_pred: np.ndarray) -> Dict[str, float]:
        """
        从混淆矩阵计算性能指标
        
        Args:
            y_true: 真实标签
            y_pred: 预测标签
            
        Returns:
            性能指标字典
        """
        try:
            cm = confusion_matrix(y_true, y_pred)
            tn, fp, fn, tp = cm.ravel() if cm.size == 4 else (0, 0, 0, 0)
            
            accuracy = (tp + tn) / (tp + tn + fp + fn + 1e-10)
            precision = tp / (tp + fp + 1e-10)
            recall = tp / (tp + fn + 1e-10)
            f1_score = 2 * (precision * recall) / (precision + recall + 1e-10)
            
            return {
                'accuracy': accuracy,
                'precision': precision,
                'recall': recall,
                'f1_score': f1_score,
                'tp': tp,
                'tn': tn,
                'fp': fp,
                'fn': fn
            }
        except Exception as e:
            logger.error(f"计算混淆矩阵指标时出错: {str(e)}")
            return {
                'accuracy': 0.0,
                'precision': 0.0,
                'recall': 0.0,
                'f1_score': 0.0
            }


# 全局检测器实例
from src.utils.config_loader import get_config

config = get_config()
data_drift_detector = DataDriftDetector(config.get('monitor', {}).get('model', {}).get('drift_detection', {}))
model_drift_detector = ModelDriftDetector(config.get('monitor', {}).get('model', {}).get('performance_tracking', {}))