import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import seaborn as sns
from collections import defaultdict
import time

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class SimpleAttentionNet(nn.Module):
    def __init__(self, vocab_size=10, embed_dim=3, hidden_dim=4, num_classes=2):
        super(SimpleAttentionNet, self).__init__()
        self.embed_dim = embed_dim
        self.hidden_dim = hidden_dim
        
        # 词嵌入层
        self.embedding = nn.Embedding(vocab_size, embed_dim)
        
        # 注意力层
        self.attention = nn.MultiheadAttention(embed_dim, num_heads=1, batch_first=True)
        
        # 全连接分类层
        self.classifier = nn.Linear(embed_dim, num_classes)
        
        # 初始化权重
        self._init_weights()
    
    def _init_weights(self):
        for module in self.modules():
            if isinstance(module, nn.Linear):
                nn.init.xavier_uniform_(module.weight)
                nn.init.zeros_(module.bias)
            elif isinstance(module, nn.Embedding):
                nn.init.normal_(module.weight, mean=0, std=0.1)
    
    def forward(self, x):
        # x shape: (batch_size, seq_len)
        batch_size, seq_len = x.shape
        
        # 词嵌入
        embedded = self.embedding(x)  # (batch_size, seq_len, embed_dim)
        
        # 自注意力
        attn_output, attn_weights = self.attention(embedded, embedded, embedded)
        
        # 全局平均池化
        pooled = torch.mean(attn_output, dim=1)  # (batch_size, embed_dim)
        
        # 分类
        output = self.classifier(pooled)  # (batch_size, num_classes)
        
        return output, attn_weights

class GradientTracker:
    def __init__(self, model):
        self.model = model
        self.gradients = defaultdict(list)
        self.param_names = []
        self.losses = []
        self.learning_rates = []
        
        # 注册钩子来跟踪梯度
        for name, param in model.named_parameters():
            self.param_names.append(name)
            param.register_hook(lambda grad, name=name: self._gradient_hook(grad, name))
    
    def _gradient_hook(self, grad, name):
        if grad is not None:
            grad_norm = grad.norm().item()
            self.gradients[name].append(grad_norm)
    
    def record_loss(self, loss):
        self.losses.append(loss.item())
    
    def record_lr(self, lr):
        self.learning_rates.append(lr)

def create_simple_dataset():
    """创建简单的句子分类数据集"""
    # 简单的句子数据
    sentences = [
        [0, 1, 2],  # 句子1
        [3, 4, 5],  # 句子2
        [6, 7, 8],  # 句子3
        [1, 2, 3],  # 句子4
        [4, 5, 6],  # 句子5
        [7, 8, 9],  # 句子6
    ]
    
    # 简单的标签：前3个为类别0，后3个为类别1
    labels = [0, 0, 0, 1, 1, 1]
    
    return torch.tensor(sentences, dtype=torch.long), torch.tensor(labels, dtype=torch.long)

def visualize_gradients_and_loss(tracker, save_path=None):
    """可视化梯度和loss变化"""
    fig, axes = plt.subplots(2, 2, figsize=(15, 10))
    fig.suptitle('注意力网络训练过程可视化', fontsize=16)
    
    # 1. Loss变化
    axes[0, 0].plot(tracker.losses, 'b-', linewidth=2)
    axes[0, 0].set_title('Loss变化')
    axes[0, 0].set_xlabel('训练步数')
    axes[0, 0].set_ylabel('Loss')
    axes[0, 0].grid(True, alpha=0.3)
    
    # 2. 学习率变化
    if tracker.learning_rates:
        axes[0, 1].plot(tracker.learning_rates, 'r-', linewidth=2)
        axes[0, 1].set_title('学习率变化')
        axes[0, 1].set_xlabel('训练步数')
        axes[0, 1].set_ylabel('学习率')
        axes[0, 1].grid(True, alpha=0.3)
    
    # 3. 梯度变化热力图
    if tracker.gradients:
        # 准备梯度数据
        max_steps = max(len(grads) for grads in tracker.gradients.values())
        grad_matrix = np.zeros((len(tracker.param_names), max_steps))
        
        for i, name in enumerate(tracker.param_names):
            grads = tracker.gradients[name]
            grad_matrix[i, :len(grads)] = grads
        
        # 绘制热力图
        im = axes[1, 0].imshow(grad_matrix, aspect='auto', cmap='viridis')
        axes[1, 0].set_title('参数梯度变化热力图')
        axes[1, 0].set_xlabel('训练步数')
        axes[1, 0].set_ylabel('参数')
        axes[1, 0].set_yticks(range(len(tracker.param_names)))
        axes[1, 0].set_yticklabels([name.split('.')[-1] for name in tracker.param_names])
        plt.colorbar(im, ax=axes[1, 0])
    
    # 4. 参数梯度分布
    if tracker.gradients:
        # 选择最后几步的梯度进行分布分析
        recent_grads = []
        param_labels = []
        for name in tracker.param_names:
            if tracker.gradients[name]:
                recent_grads.extend(tracker.gradients[name][-10:])  # 最后10步
                param_labels.extend([name.split('.')[-1]] * min(10, len(tracker.gradients[name])))
        
        if recent_grads:
            axes[1, 1].hist(recent_grads, bins=20, alpha=0.7, edgecolor='black')
            axes[1, 1].set_title('最近梯度分布')
            axes[1, 1].set_xlabel('梯度范数')
            axes[1, 1].set_ylabel('频次')
            axes[1, 1].grid(True, alpha=0.3)
    
    plt.tight_layout()
    
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
    
    plt.show()

def train_with_gradient_tracking():
    """训练模型并跟踪梯度"""
    # 创建模型
    model = SimpleAttentionNet()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.9)
    
    # 创建梯度跟踪器
    tracker = GradientTracker(model)
    
    # 准备数据
    sentences, labels = create_simple_dataset()
    
    print("开始训练...")
    print(f"模型参数数量: {sum(p.numel() for p in model.parameters())}")
    print(f"可训练参数数量: {sum(p.numel() for p in model.parameters() if p.requires_grad)}")
    
    # 训练循环
    num_epochs = 50
    for epoch in range(num_epochs):
        optimizer.zero_grad()
        
        # 前向传播
        outputs, attention_weights = model(sentences)
        loss = criterion(outputs, labels)
        
        # 反向传播
        loss.backward()
        
        # 记录信息
        tracker.record_loss(loss)
        tracker.record_lr(optimizer.param_groups[0]['lr'])
        
        # 更新参数
        optimizer.step()
        scheduler.step()
        
        if epoch % 10 == 0:
            print(f"Epoch {epoch}, Loss: {loss.item():.4f}, LR: {optimizer.param_groups[0]['lr']:.6f}")
    
    print("训练完成!")
    
    # 可视化结果
    visualize_gradients_and_loss(tracker, 'gradient_analysis.png')
    
    return model, tracker

def analyze_attention_weights(model, sentences):
    """分析注意力权重"""
    model.eval()
    with torch.no_grad():
        outputs, attention_weights = model(sentences)
    
    # 可视化注意力权重
    fig, axes = plt.subplots(2, 3, figsize=(15, 10))
    fig.suptitle('注意力权重可视化', fontsize=16)
    
    for i in range(6):
        row, col = i // 3, i % 3
        attn = attention_weights[i].numpy()
        
        im = axes[row, col].imshow(attn, cmap='Blues', aspect='auto')
        axes[row, col].set_title(f'句子 {i+1} 注意力权重')
        axes[row, col].set_xlabel('查询位置')
        axes[row, col].set_ylabel('键位置')
        
        # 添加数值标注
        for j in range(attn.shape[0]):
            for k in range(attn.shape[1]):
                axes[row, col].text(k, j, f'{attn[j, k]:.2f}', 
                                  ha='center', va='center', fontsize=8)
        
        plt.colorbar(im, ax=axes[row, col])
    
    plt.tight_layout()
    plt.savefig('attention_weights.png', dpi=300, bbox_inches='tight')
    plt.show()

def print_model_info(model):
    """打印模型信息"""
    print("\n=== 模型结构 ===")
    print(model)
    
    print("\n=== 参数详情 ===")
    total_params = 0
    for name, param in model.named_parameters():
        print(f"{name}: {param.shape}, 参数数量: {param.numel()}")
        total_params += param.numel()
    
    print(f"\n总参数数量: {total_params}")

if __name__ == "__main__":
    # 设置随机种子
    torch.manual_seed(42)
    np.random.seed(42)
    
    print("=== 简单注意力网络梯度分析 ===")
    
    # 训练模型
    model, tracker = train_with_gradient_tracking()
    
    # 打印模型信息
    print_model_info(model)
    
    # 分析注意力权重
    sentences, _ = create_simple_dataset()
    analyze_attention_weights(model, sentences)
    
    print("\n分析完成！请查看生成的图片文件。")
