# #为什么需要获取中间层的值？
# #在深度学习中，神经网络就像一个"黑盒子"，
# # 我们只能看到输入和输出。但有时我们需要"打开黑盒"看看内部发生了什么

# #PyTorch的两种主要方法:
# #方法1：使用 Hook（钩子函数）
# #核心思想：在前向/反向传播时"插入"一个函数，自动捕获中间值
# """
# 前向钩子 (Forward Hook)
# """

# import torch 
# import torch.nn as nn

# #示例模型
# class MyModel(nn.Module):
#     def __init__(self):
#         super(MyModel,self).__init__()
#         self.layer1 = nn.Linear(10,20)
#         self.relu = nn.ReLU()
#         self.layer2 = nn.Linear(20,5)

#     def forward(self,x):
#         x = self.layer1(x)
#         x = self.relu(x)
#         x = self.layer2(x)
#         return x
# model = MyModel()

# # ===== 关键代码：注册Hook =====
# activations = {}  # 存储中间值的字典

# def get_activation(name):
#     """闭包：创建一个捕获特定层输出的函数"""
#     def hook(model, input, output):
#         activations[name] = output.detach()  # detach()防止影响梯度
#     return hook

# model.layer1.register_forward_hook(get_activation('layer1'))
# model.relu.register_forward_hook(get_activation('relu'))
# model.layer2.register_backward_hook(get_activation('layer2'))

# # 测试
# x = torch.randn(3, 10)  # batch_size=3
# print(x)
# output = model(x)

# # 查看中间值
# print("Layer1输出形状:", activations['layer1'].shape)  # torch.Size([3, 20])
# print("ReLU输出形状:", activations['relu'].shape)      # torch.Size([3, 20])
# print("Layer1前10个值:\n", activations['layer1'][0, :10])


#1.2 反向钩子 (Backward Hook)

# gradients = {}

# def get_gradient(name):
#     def hook(grad):
#         gradients[name] = grad
#     return hook

# # 注册反向hook（需要在tensor上注册）
# x = torch.randn(3, 10, requires_grad=True)
# output = model(x)

# # 为中间层注册
# handle = activations['layer1'].register_hook(get_gradient('layer1_grad'))

# # 反向传播
# loss = output.sum()
# loss.backward()

# print("Layer1的梯度:", gradients['layer1_grad'].shape)





import torch 
import torch.nn as nn
import matplotlib.pyplot as plt


# ===== 1. 准备模型和数据 =====
torch.manual_seed(0)
x = torch.unsqueeze(torch.linspace(-5,5,100),dim=1)*10
y = torch.sin(x) + 0.05*torch.randn(x.size())

#线性模型
linear_model = nn.Linear(1,1)

# 非线性模型（会成功）
class NoLinearModel(nn,Module):
    def __init__(self):
        super(NoLinearModel,self).__init__()
        self.fc1 = nn.Linear(1,32)
        self.relu1 = nn.ReLU()
        self.fc2 = nn,Linear(32,32)
        self.relu2 = nn.ReLu()
        self.fc3 = nn.Linear(32,1)
    
    def forward(self,x):
        x = self.fc1(x)
        x = self.relu1(x)
        x = self.fc2(x)
        x = self.relu2(x)
        x = self.fc3(x)
        return x

nonlinear_model = NoLinearModel()

# ===== 2. 注册Hook来捕获中间值 =====
activations = {}

def save_activation(name):
    def hook(module, input, output):
        activations[name] = output.detach()
    return hook

# 为非线性模型的每一层注册hook
nonlinear_model.fc1.register_forward_hook(save_activation('fc1'))
nonlinear_model.relu1.register_forward_hook(save_activation('relu1'))
nonlinear_model.fc2.register_forward_hook(save_activation('fc2'))
nonlinear_model.relu2.register_forward_hook(save_activation('relu2'))

# ===== 3. 训练非线性模型 =====
optimizer = torch.optim.Adam(nonlinear_model.parameters(), lr=0.01)
criterion = nn.MSELoss()
for epoch in range(500):
    output = nonlinear_model(x)
    loss = criterion(output, y)
    
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

# ===== 4. 可视化中间层的表示 =====
with torch.no_grad():
    final_output = nonlinear_model(x)

fig, axes = plt.subplots(2, 3, figsize=(15, 8))

# 可视化每一层的激活
layers = ['fc1', 'relu1', 'fc2', 'relu2']
for idx, layer_name in enumerate(layers):
    ax = axes[idx // 3, idx % 3]
    
    # 取前5个神经元的激活
    layer_output = activations[layer_name][:, :5].numpy()
    
    for i in range(5):
        ax.plot(x.numpy(), layer_output[:, i], label=f'Neuron {i+1}', alpha=0.7)
    
    ax.set_title(f'{layer_name} 输出 (前5个神经元)')
    ax.legend(fontsize=8)
    ax.grid(True, alpha=0.3)

# 最终拟合效果
ax = axes[1, 2]
ax.scatter(x.numpy(), y.numpy(), label='真实数据', alpha=0.5, s=10)
ax.plot(x.numpy(), final_output.numpy(), 'r-', label='模型预测', linewidth=2)
ax.set_title('最终拟合效果')
ax.legend()
ax.grid(True, alpha=0.3)

plt.tight_layout()
plt.show()

# ===== 5. 分析中间层统计信息 =====
print("\n===== 中间层分析 =====")
for name, activation in activations.items():
    print(f"\n{name}:")
    print(f"  形状: {activation.shape}")
    print(f"  均值: {activation.mean().item():.4f}")
    print(f"  标准差: {activation.std().item():.4f}")
    print(f"  最小值: {activation.min().item():.4f}")
    print(f"  最大值: {activation.max().item():.4f}")
    
    # 检查死神经元（ReLU后全为0）
    if 'relu' in name:
        dead_neurons = (activation.sum(dim=0) == 0).sum().item()
        print(f"  💀 死神经元数量: {dead_neurons}/{activation.shape[1]}")