import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from pathlib import Path
from typing import Dict, List, Tuple, Any
import pandas as pd


class ModelInterpreter:
    """模型整体解释工具"""
    
    def __init__(self, model, save_dir: str = "explainability_results"):
        self.model = model
        self.save_dir = Path(save_dir)
        self.save_dir.mkdir(parents=True, exist_ok=True)
    
    def analyze_model_complexity(self) -> Dict[str, Any]:
        """分析模型复杂度"""
        total_params = sum(p.numel() for p in self.model.parameters())
        trainable_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
        
        # 按层统计参数
        layer_params = {}
        for name, module in self.model.named_modules():
            if len(list(module.children())) == 0:  # 叶子节点
                params = sum(p.numel() for p in module.parameters())
                if params > 0:
                    layer_params[name] = params
        
        complexity_info = {
            'total_parameters': total_params,
            'trainable_parameters': trainable_params,
            'layer_parameters': layer_params,
            'model_size_mb': total_params * 4 / (1024 * 1024)  # 假设float32
        }
        
        return complexity_info
    
    def visualize_model_architecture(self):
        """可视化模型架构"""
        complexity = self.analyze_model_complexity()
        
        # 创建参数分布图
        layer_names = list(complexity['layer_parameters'].keys())
        param_counts = list(complexity['layer_parameters'].values())
        
        # 只显示参数最多的前20层
        if len(layer_names) > 20:
            sorted_indices = np.argsort(param_counts)[-20:]
            layer_names = [layer_names[i] for i in sorted_indices]
            param_counts = [param_counts[i] for i in sorted_indices]
        
        plt.figure(figsize=(15, 8))
        bars = plt.barh(range(len(layer_names)), param_counts)
        plt.yticks(range(len(layer_names)), [name.split('.')[-1] for name in layer_names])
        plt.xlabel('Parameter Count')
        plt.title('Model Layer Parameter Distribution')
        plt.tight_layout()
        
        # 添加数值标签
        for i, (bar, count) in enumerate(zip(bars, param_counts)):
            plt.text(bar.get_width(), bar.get_y() + bar.get_height()/2, 
                    f'{count:,}', ha='left', va='center')
        
        save_path = self.save_dir / 'model_architecture.png'
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        # 保存详细信息到文件
        with open(self.save_dir / 'model_complexity.txt', 'w') as f:
            f.write(f"Total Parameters: {complexity['total_parameters']:,}\n")
            f.write(f"Trainable Parameters: {complexity['trainable_parameters']:,}\n")
            f.write(f"Model Size: {complexity['model_size_mb']:.2f} MB\n\n")
            f.write("Layer Parameter Distribution:\n")
            for name, params in complexity['layer_parameters'].items():
                f.write(f"{name}: {params:,}\n")
        
        print(f"Model architecture analysis saved to {save_path}")
        return complexity


class LayerAnalyzer:
    """层级分析工具"""
    
    def __init__(self, save_dir: str = "explainability_results"):
        self.save_dir = Path(save_dir)
        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.layer_outputs = {}
        self.hooks = []
    
    def register_hooks(self, model, layer_names: List[str]):
        """注册钩子来分析特定层"""
        def get_stats(name):
            def hook(model, input, output):
                if isinstance(output, torch.Tensor):
                    self.layer_outputs[name] = {
                        'shape': output.shape,
                        'mean': output.mean().item(),
                        'std': output.std().item(),
                        'min': output.min().item(),
                        'max': output.max().item(),
                        'zeros_ratio': (output == 0).float().mean().item()
                    }
            return hook
        
        for name, module in model.named_modules():
            if name in layer_names:
                hook = module.register_forward_hook(get_stats(name))
                self.hooks.append(hook)
    
    def remove_hooks(self):
        """移除钩子"""
        for hook in self.hooks:
            hook.remove()
        self.hooks = []
    
    def analyze_layer_statistics(self):
        """分析层统计信息"""
        if not self.layer_outputs:
            print("No layer outputs recorded. Make sure to run a forward pass after registering hooks.")
            return
        
        # 创建统计表格
        stats_data = []
        for layer_name, stats in self.layer_outputs.items():
            stats_data.append({
                'Layer': layer_name,
                'Shape': str(stats['shape']),
                'Mean': f"{stats['mean']:.4f}",
                'Std': f"{stats['std']:.4f}",
                'Min': f"{stats['min']:.4f}",
                'Max': f"{stats['max']:.4f}",
                'Zeros%': f"{stats['zeros_ratio']*100:.2f}%"
            })
        
        df = pd.DataFrame(stats_data)
        
        # 保存到CSV
        csv_path = self.save_dir / 'layer_statistics.csv'
        df.to_csv(csv_path, index=False)
        
        # 创建可视化
        numeric_cols = ['Mean', 'Std', 'Min', 'Max']
        fig, axes = plt.subplots(2, 2, figsize=(15, 10))
        axes = axes.flatten()
        
        for i, col in enumerate(numeric_cols):
            values = [float(self.layer_outputs[name][col.lower()]) for name in self.layer_outputs.keys()]
            layer_names = [name.split('.')[-1] for name in self.layer_outputs.keys()]
            
            axes[i].bar(range(len(values)), values)
            axes[i].set_title(f'Layer {col} Values')
            axes[i].set_xlabel('Layer')
            axes[i].set_ylabel(col)
            axes[i].tick_params(axis='x', rotation=45)
            
            # 设置x轴标签
            if len(layer_names) <= 10:
                axes[i].set_xticks(range(len(layer_names)))
                axes[i].set_xticklabels(layer_names, rotation=45, ha='right')
        
        plt.tight_layout()
        
        save_path = self.save_dir / 'layer_statistics.png'
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        
        print(f"Layer analysis saved to {save_path} and {csv_path}")
        return df
