import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import os
import csv
# import matplotlib.pyplot as plt
import numpy as np

# ----------------------------
# 配置设备
# ----------------------------
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"Using device: {device}")

# ----------------------------
# 数据集（只取前10个样本）
# ----------------------------
transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

# 创建只包含前10个测试样本的子集
test_dataset = datasets.MNIST(root='../data', train=False, download=True, transform=transform)
subset_indices = list(range(10))  # 只取前10个样本
test_subset = torch.utils.data.Subset(test_dataset, subset_indices)
test_loader = DataLoader(test_subset, batch_size=10, shuffle=False)  # 批量大小改为10

# ----------------------------
# 定义模型结构（必须和训练时完全一致）
# ----------------------------
class SimpleCNN(nn.Module):
    def __init__(self):
        super(SimpleCNN, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(64*14*14, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = self.pool(x)
        x = x.view(x.size(0), -1)
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x

model = SimpleCNN().to(device)

# ----------------------------
# 中间层特征提取的钩子函数
# ----------------------------
layer_outputs = {}
layer_inputs = {}

def get_layer_input_hook(name):
    def hook(module, input, output):
        layer_inputs[name] = input[0].detach().cpu()
    return hook

def get_layer_output_hook(name):
    def hook(module, input, output):
        layer_outputs[name] = output.detach().cpu()
    return hook

# 注册钩子
hooks = []
for name, module in model.named_children():
    hooks.append(module.register_forward_hook(get_layer_input_hook(name)))
    hooks.append(module.register_forward_hook(get_layer_output_hook(name)))

# ----------------------------
# 详细的层参数分析函数
# ----------------------------
def analyze_layer_parameters():
    print("\n" + "="*60)
    print("模型层参数详细分析")
    print("="*60)
    
    total_params = 0
    for name, module in model.named_children():
        print(f"\n层名称: {name}")
        print(f"层类型: {module.__class__.__name__}")
        
        if hasattr(module, 'weight'):
            weight = module.weight
            print(f"权重形状: {weight.shape}")
            print(f"权重统计 - 最小值: {weight.min().item():.6f}, 最大值: {weight.max().item():.6f}, 均值: {weight.mean().item():.6f}")
            
            layer_params = weight.numel()
            if hasattr(module, 'bias') and module.bias is not None:
                bias = module.bias
                print(f"偏置形状: {bias.shape}")
                print(f"偏置统计 - 最小值: {bias.min().item():.6f}, 最大值: {bias.max().item():.6f}, 均值: {bias.mean().item():.6f}")
                layer_params += bias.numel()
            
            print(f"该层参数数量: {layer_params:,}")
            total_params += layer_params
        
        if isinstance(module, nn.Conv2d):
            print(f"卷积核大小: {module.kernel_size}")
            print(f"步长: {module.stride}")
            print(f"填充: {module.padding}")
            print(f"输入通道: {module.in_channels}")
            print(f"输出通道: {module.out_channels}")
        
        elif isinstance(module, nn.Linear):
            print(f"输入特征数: {module.in_features}")
            print(f"输出特征数: {module.out_features}")
        
        elif isinstance(module, nn.MaxPool2d):
            print(f"池化核大小: {module.kernel_size}")
            print(f"步长: {module.stride}")
    
    print(f"\n模型总参数数量: {total_params:,}")

# ----------------------------
# 保存中间层特征（修改为按图片单独保存为npy格式）
# ----------------------------
def save_layer_features(fixed_data, fixed_target, epoch):
    # 使用临时目录避免权限问题
    import tempfile
    feature_dir = os.path.join("../out/", f"layer_features_epoch_{epoch:02d}")
    
    os.makedirs(feature_dir, exist_ok=True)
    
    print(f"\n保存中间层特征到目录: {feature_dir}")
    
    batch_size = fixed_data.shape[0]
    
    # 保存固定数据
    torch.save({
        'data': fixed_data.cpu(),
        'target': fixed_target.cpu()
    }, os.path.join(feature_dir, 'fixed_data.pth'))
    
    # 为每个图片单独保存中间层特征
    for img_idx in range(batch_size):
        # 为每个图片创建子目录
        img_dir = os.path.join(feature_dir, f"image_{img_idx:04d}")
        os.makedirs(img_dir, exist_ok=True)
        
        # 保存该图片的标签
        np.save(os.path.join(img_dir, 'label.npy'), fixed_target[img_idx].cpu().numpy())
        
        # 保存该图片在每个层的输入输出
        for layer_name in layer_inputs.keys():
            input_data = layer_inputs[layer_name][img_idx]  # 取第img_idx个图片
            output_data = layer_outputs[layer_name][img_idx]  # 取第img_idx个图片
            
            # 保存为npy格式
            np.save(os.path.join(img_dir, f'{layer_name}_input.npy'), input_data.numpy())
            np.save(os.path.join(img_dir, f'{layer_name}_output.npy'), output_data.numpy())
        
        # 打印所有10个图片的信息
        print(f"保存 图片{img_idx}: 标签={fixed_target[img_idx].item()}")
    
    print(f"总共保存了 {batch_size} 张图片的中间层特征")
    
    # 打印层形状信息（批量级别）
    print(f"\n批量级别的层形状信息:")
    for layer_name in layer_inputs.keys():
        input_data = layer_inputs[layer_name]
        output_data = layer_outputs[layer_name]
        print(f"{layer_name}: 输入形状 {input_data.shape}, 输出形状 {output_data.shape}")

# ----------------------------
# 测试函数
# ----------------------------
def test_model_with_features():
    model.eval()
    criterion = nn.CrossEntropyLoss()
    test_loss = 0
    correct = 0
    
    # 获取固定的一批数据（现在只有10个样本）
    fixed_data, fixed_target = next(iter(test_loader))
    fixed_data, fixed_target = fixed_data.to(device), fixed_target.to(device)
    
    with torch.no_grad():
        # 前向传播（这会触发钩子，保存中间层特征）
        output = model(fixed_data)
        test_loss = criterion(output, fixed_target).item()
        pred = output.argmax(dim=1)
        correct = (pred == fixed_target).sum().item()

    accuracy = 100. * correct / len(fixed_target)
    print(f"Test Loss: {test_loss:.4f}, Accuracy: {accuracy:.2f}%")
    
    return test_loss, accuracy, fixed_data, fixed_target

# ----------------------------
# 加载指定 epoch 的模型
# ----------------------------
save_dir = "../checkpoints"
epoch_to_load = 1
model_path = os.path.join(save_dir, f"mnist_model_epoch_{epoch_to_load:02d}.pth")

if not os.path.exists(model_path):
    raise FileNotFoundError(f"模型文件 {model_path} 不存在！")

model.load_state_dict(torch.load(model_path, map_location=device))
model.to(device)
print(f"Loaded model from epoch {epoch_to_load}")

# ----------------------------
# 分析模型参数
# ----------------------------
analyze_layer_parameters()

# ----------------------------
# 测试模型并获取中间层特征
# ----------------------------
print("\n" + "="*60)
print("开始测试并提取中间层特征（10个样本）...")
print("="*60)

test_loss, accuracy, fixed_data, fixed_target = test_model_with_features()

# ----------------------------
# 保存中间层特征
# ----------------------------
save_layer_features(fixed_data, fixed_target, epoch_to_load)

# ----------------------------
# 打印详细的中间层信息
# ----------------------------
print("\n" + "="*60)
print("中间层输入输出详细信息（10个样本）")
print("="*60)

for layer_name in layer_inputs.keys():
    input_data = layer_inputs[layer_name]
    output_data = layer_outputs[layer_name]
    
    print(f"\n{layer_name}:")
    print(f"  输入形状: {input_data.shape}")
    print(f"  输入统计 - 最小值: {input_data.min().item():.6f}, 最大值: {input_data.max().item():.6f}, 均值: {input_data.mean().item():.6f}")
    print(f"  输出形状: {output_data.shape}")
    print(f"  输出统计 - 最小值: {output_data.min().item():.6f}, 最大值: {output_data.max().item():.6f}, 均值: {output_data.mean().item():.6f}")

# ----------------------------
# 移除钩子
# ----------------------------
for hook in hooks:
    hook.remove()

# ----------------------------
# 可视化训练日志
# ----------------------------
log_path = os.path.join(save_dir, "training_log.csv")
if os.path.exists(log_path):
    epochs, train_loss_list, test_loss_list, acc_list = [], [], [], []
    with open(log_path, 'r') as f:
        reader = csv.DictReader(f)
        for row in reader:
            epochs.append(int(row['epoch']))
            train_loss_list.append(float(row['train_loss']))
            test_loss_list.append(float(row['test_loss']))
            acc_list.append(float(row['accuracy']))

    # plt.figure(figsize=(10,4))
    # plt.subplot(1,2,1)
    # plt.plot(epochs, train_loss_list, label='Train Loss')
    # plt.plot(epochs, test_loss_list, label='Test Loss')
    # plt.xlabel('Epoch')
    # plt.ylabel('Loss')
    # plt.legend()
    # plt.title('Loss Curve')

    # plt.subplot(1,2,2)
    # plt.plot(epochs, acc_list, label='Accuracy', color='green')
    # plt.xlabel('Epoch')
    # plt.ylabel('Accuracy (%)')
    # plt.title('Accuracy Curve')
    # plt.show()
else:
    print(f"训练日志 {log_path} 不存在，无法绘图")