import torch
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from typing import Dict, List, Any
import seaborn as sns


class LayerAnalyzer:
    """层级分析器"""
    
    def __init__(self, save_dir: Path):
        self.save_dir = Path(save_dir)
        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.hooks = []
        self.layer_outputs = {}
        self.layer_stats = {}
        
    def register_hooks(self, model: torch.nn.Module, target_layers: List[str]):
        """注册钩子来捕获层输出"""
        
        def get_activation(name):
            def hook(model, input, output):
                if isinstance(output, torch.Tensor):
                    self.layer_outputs[name] = output.detach().cpu()
                elif isinstance(output, (tuple, list)):
                    self.layer_outputs[name] = output[0].detach().cpu()
            return hook
        
        for name, module in model.named_modules():
            if name in target_layers:
                hook = module.register_forward_hook(get_activation(name))
                self.hooks.append(hook)
                
    def remove_hooks(self):
        """移除所有钩子"""
        for hook in self.hooks:
            hook.remove()
        self.hooks.clear()
        
    def analyze_layer_statistics(self) -> Dict[str, Dict[str, float]]:
        """分析层统计信息"""
        for layer_name, output in self.layer_outputs.items():
            if isinstance(output, torch.Tensor):
                output_flat = output.flatten().numpy()
                
                self.layer_stats[layer_name] = {
                    'mean': float(np.mean(output_flat)),
                    'std': float(np.std(output_flat)),
                    'min': float(np.min(output_flat)),
                    'max': float(np.max(output_flat)),
                    'median': float(np.median(output_flat)),
                    'sparsity': float(np.mean(output_flat == 0)),
                    'l1_norm': float(np.mean(np.abs(output_flat))),
                    'l2_norm': float(np.sqrt(np.mean(output_flat ** 2))),
                    'entropy': self._compute_entropy(output_flat)
                }
        
        # 可视化统计信息
        self._visualize_layer_statistics()
        return self.layer_stats
    
    def _compute_entropy(self, data: np.ndarray, bins: int = 50) -> float:
        """计算数据的熵"""
        hist, _ = np.histogram(data, bins=bins, density=True)
        hist = hist[hist > 0]  # 移除零值
        entropy = -np.sum(hist * np.log2(hist + 1e-10))
        return float(entropy)
    
    def _visualize_layer_statistics(self):
        """可视化层统计信息"""
        if not self.layer_stats:
            return
        
        layer_names = list(self.layer_stats.keys())
        metrics = ['mean', 'std', 'sparsity', 'entropy']
        
        fig, axes = plt.subplots(2, 2, figsize=(16, 12))
        fig.suptitle('Layer Statistics Analysis', fontsize=16)
        
        for idx, metric in enumerate(metrics):
            row = idx // 2
            col = idx % 2
            
            values = [self.layer_stats[name][metric] for name in layer_names]
            
            axes[row, col].bar(range(len(layer_names)), values)
            axes[row, col].set_title(f'{metric.capitalize()} by Layer')
            axes[row, col].set_xticks(range(len(layer_names)))
            axes[row, col].set_xticklabels([name.split('.')[-1][:8] for name in layer_names], 
                                         rotation=45)
            axes[row, col].tick_params(axis='x', labelsize=8)
        
        plt.tight_layout()
        save_path = self.save_dir / 'layer_statistics.png'
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"层统计分析图已保存: {save_path}")
        
        # 创建详细的统计热图
        self._create_statistics_heatmap()
    
    def _create_statistics_heatmap(self):
        """创建统计信息热图"""
        if not self.layer_stats:
            return
        
        layer_names = list(self.layer_stats.keys())
        metrics = ['mean', 'std', 'sparsity', 'entropy', 'l1_norm', 'l2_norm']
        
        # 创建数据矩阵
        data_matrix = []
        for layer_name in layer_names:
            row = [self.layer_stats[layer_name][metric] for metric in metrics]
            data_matrix.append(row)
        
        data_matrix = np.array(data_matrix)
        
        # 标准化数据以便比较
        normalized_data = (data_matrix - data_matrix.mean(axis=0)) / (data_matrix.std(axis=0) + 1e-8)
        
        plt.figure(figsize=(10, max(8, len(layer_names) * 0.5)))
        sns.heatmap(normalized_data, 
                   xticklabels=metrics,
                   yticklabels=[name.split('.')[-1][:15] for name in layer_names],
                   cmap='RdBu_r', center=0, annot=False)
        
        plt.title('Layer Statistics Heatmap (Normalized)')
        plt.xlabel('Metrics')
        plt.ylabel('Layers')
        plt.tight_layout()
        
        save_path = self.save_dir / 'layer_statistics_heatmap.png'
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"层统计热图已保存: {save_path}")
