import torch
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from typing import Dict, List, Any, Tuple
import torch.nn.functional as F
from scipy.stats import entropy


class ExplainabilityMetrics:
    """可解释性指标计算器"""
    
    def __init__(self, save_dir: Path):
        self.save_dir = Path(save_dir)
        self.save_dir.mkdir(parents=True, exist_ok=True)
        
    def analyze_information_flow(self, model: torch.nn.Module, 
                               input1: torch.Tensor, input2: torch.Tensor) -> Dict[str, Dict[str, float]]:
        """分析模型信息流"""
        activations = {}
        
        def get_activation(name):
            def hook(model, input, output):
                if isinstance(output, torch.Tensor):
                    activations[name] = output.detach().cpu()
            return hook
        
        # 注册钩子
        hooks = []
        for name, module in model.named_modules():
            if isinstance(module, (torch.nn.Conv2d, torch.nn.Linear)):
                hook = module.register_forward_hook(get_activation(name))
                hooks.append(hook)
        
        # 前向传播
        with torch.no_grad():
            _ = model(input1, input2)
        
        # 计算信息指标
        info_metrics = {}
        for name, activation in activations.items():
            if len(activation.shape) >= 2:
                metrics = self._compute_activation_metrics(activation)
                info_metrics[name] = metrics
        
        # 清理钩子
        for hook in hooks:
            hook.remove()
        
        return info_metrics
    
    def _compute_activation_metrics(self, activation: torch.Tensor) -> Dict[str, float]:
        """计算激活的各种指标"""
        act_flat = activation.flatten().numpy()
        
        metrics = {
            'mean': float(np.mean(act_flat)),
            'std': float(np.std(act_flat)),
            'sparsity': float(np.mean(act_flat == 0)),
            'entropy': self._compute_entropy(act_flat),
            'effective_rank': self._compute_effective_rank(activation),
            'activation_ratio': float(np.mean(act_flat > 0)),
            'saturation_ratio': self._compute_saturation_ratio(act_flat)
        }
        
        return metrics
    
    def _compute_entropy(self, data: np.ndarray, bins: int = 50) -> float:
        """计算数据熵"""
        hist, _ = np.histogram(data, bins=bins, density=True)
        hist = hist[hist > 0]
        return float(entropy(hist, base=2))
    
    def _compute_effective_rank(self, tensor: torch.Tensor) -> float:
        """计算有效秩"""
        if len(tensor.shape) == 4:  # Conv层 (B, C, H, W)
            tensor_2d = tensor.view(tensor.shape[0], -1)
        elif len(tensor.shape) == 2:  # FC层 (B, F)
            tensor_2d = tensor
        else:
            return 0.0
        
        try:
            # 计算奇异值
            _, s, _ = torch.svd(tensor_2d.float())
            s_normalized = s / s.sum()
            entropy_s = -(s_normalized * torch.log(s_normalized + 1e-12)).sum()
            effective_rank = torch.exp(entropy_s)
            return float(effective_rank)
        except:
            return 0.0
    
    def _compute_saturation_ratio(self, data: np.ndarray, threshold: float = 0.99) -> float:
        """计算饱和比率"""
        max_val = np.max(np.abs(data))
        if max_val == 0:
            return 0.0
        saturated = np.sum(np.abs(data) > threshold * max_val)
        return float(saturated / len(data))
    
    def visualize_information_metrics(self, info_metrics: Dict[str, Dict[str, float]]):
        """可视化信息指标"""
        if not info_metrics:
            return
        
        layer_names = list(info_metrics.keys())
        metrics = ['entropy', 'sparsity', 'effective_rank', 'activation_ratio']
        
        fig, axes = plt.subplots(2, 2, figsize=(16, 12))
        fig.suptitle('Information Flow Metrics', fontsize=16)
        
        for idx, metric in enumerate(metrics):
            row = idx // 2
            col = idx % 2
            
            values = [info_metrics[name].get(metric, 0) for name in layer_names]
            
            axes[row, col].bar(range(len(layer_names)), values)
            axes[row, col].set_title(f'{metric.replace("_", " ").title()} by Layer')
            axes[row, col].set_xticks(range(len(layer_names)))
            axes[row, col].set_xticklabels([name.split('.')[-1][:8] for name in layer_names], 
                                         rotation=45)
            axes[row, col].tick_params(axis='x', labelsize=8)
        
        plt.tight_layout()
        save_path = self.save_dir / 'information_metrics.png'
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"信息流指标图已保存: {save_path}")
        
    def compute_model_interpretability_score(self, info_metrics: Dict[str, Dict[str, float]]) -> float:
        """计算模型可解释性得分"""
        if not info_metrics:
            return 0.0
        
        # 计算各个维度的平均值
        avg_entropy = np.mean([metrics['entropy'] for metrics in info_metrics.values()])
        avg_sparsity = np.mean([metrics['sparsity'] for metrics in info_metrics.values()])
        avg_effective_rank = np.mean([metrics['effective_rank'] for metrics in info_metrics.values()])
        
        # 计算综合得分 (0-1之间)
        # 高熵、适度稀疏性、高有效秩表示更好的可解释性
        entropy_score = min(avg_entropy / 10.0, 1.0)  # 归一化熵
        sparsity_score = 1.0 - abs(avg_sparsity - 0.3)  # 30%稀疏性为最优
        rank_score = min(avg_effective_rank / 100.0, 1.0)  # 归一化有效秩
        
        interpretability_score = (entropy_score * 0.4 + sparsity_score * 0.3 + rank_score * 0.3)
        
        return float(interpretability_score)
