import os
import time
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import matplotlib.pyplot as plt
import matplotlib

# =================== 可调整的训练参数 ===================
# 随机种子，确保结果可复现
RANDOM_SEED = 42

# 训练超参数
BATCH_SIZE = 64         # 批次大小
LEARNING_RATE = 0.001   # 学习率
EPOCHS = 200             # 训练轮数
OPTIMIZER = 'adam'      # 优化器选择: 'adam', 'sgd', 'rmsprop'
WEIGHT_DECAY = 0.0001   # 权重衰减（L2正则化）
MOMENTUM = 0.9          # 动量（仅SGD优化器使用）

# GPU训练设置
USE_GPU = True          # 是否使用GPU
CUDA_VISIBLE_DEVICES = "0"  # 指定使用的GPU编号（多GPU时使用）

# 模型保存设置
SAVE_BEST_ONLY = True   # 是否只保存最佳模型

# 日志设置
LOG_INTERVAL = 100      # 训练时每多少批次打印一次日志
# ======================================================

# 配置环境变量
if USE_GPU:
    os.environ["CUDA_VISIBLE_DEVICES"] = CUDA_VISIBLE_DEVICES

# 配置中文显示
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

# 导入自定义模块
from dataloader import load_data, get_paths
from model import LeNet5  # 修改从lenet5_model.py导入

# 获取数据和输出路径
data_dir, output_dir = get_paths()

# 检查是否有可用的GPU
device = torch.device("cuda" if torch.cuda.is_available() and USE_GPU else "cpu")
print(f"使用设备: {device}")

# 如果使用GPU，打印GPU信息
if device.type == 'cuda':
    print(f"GPU名称: {torch.cuda.get_device_name(0)}")
    print(f"GPU显存: {torch.cuda.get_device_properties(0).total_memory / 1024 ** 3:.2f} GB")

def train_model(model, train_loader, optimizer, criterion, epoch, log_interval=LOG_INTERVAL):
    """训练模型一个epoch"""
    model.train()  # 设置为训练模式
    train_loss = 0
    correct = 0
    total = 0
    
    # 创建进度条
    start_time = time.time()
    
    for batch_idx, (data, target) in enumerate(train_loader):
        data, target = data.to(device), target.to(device)
        
        # 清零梯度
        optimizer.zero_grad()
        
        # 前向传播
        output = model(data)
        
        # 计算损失
        loss = criterion(output, target)
        
        # 反向传播
        loss.backward()
        
        # 更新参数
        optimizer.step()
        
        # 累计损失
        train_loss += loss.item()
        
        # 计算准确率
        _, predicted = output.max(1)
        total += target.size(0)
        correct += predicted.eq(target).sum().item()
        
        # 定期打印训练进度
        if batch_idx % log_interval == 0:
            elapsed = time.time() - start_time
            print(f'训练 Epoch: {epoch} [{batch_idx * len(data)}/{len(train_loader.dataset)} '
                  f'({100. * batch_idx / len(train_loader):.0f}%)]\t'
                  f'损失: {loss.item():.6f}\t'
                  f'准确率: {100. * correct / total:.2f}%\t'
                  f'用时: {elapsed:.2f}s')
            start_time = time.time()
    
    # 返回平均损失和准确率
    return train_loss / len(train_loader), 100. * correct / total

def test_model(model, test_loader, criterion):
    """评估模型性能"""
    model.eval()  # 设置为评估模式
    test_loss = 0
    correct = 0
    
    with torch.no_grad():  # 不计算梯度
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            
            # 前向传播
            output = model(data)
            
            # 计算损失
            test_loss += criterion(output, target).item()
            
            # 获取预测结果
            _, predicted = output.max(1)
            
            # 计算正确预测数量
            correct += predicted.eq(target).sum().item()
    
    # 计算平均损失和准确率
    test_loss /= len(test_loader)
    accuracy = 100. * correct / len(test_loader.dataset)
    
    print(f'\n测试集: 平均损失: {test_loss:.4f}, 准确率: {correct}/{len(test_loader.dataset)} ({accuracy:.2f}%)\n')
    
    return test_loss, accuracy

def plot_training_results(train_losses, train_accuracies, test_losses, test_accuracies):
    """可视化训练结果"""
    plt.figure(figsize=(12, 5))
    
    plt.subplot(1, 2, 1)
    plt.plot(train_losses, label='训练损失')
    plt.plot(test_losses, label='测试损失')
    plt.xlabel('Epoch')
    plt.ylabel('损失')
    plt.legend()
    plt.title('训练和测试损失')
    
    plt.subplot(1, 2, 2)
    plt.plot(train_accuracies, label='训练准确率')
    plt.plot(test_accuracies, label='测试准确率')
    plt.xlabel('Epoch')
    plt.ylabel('准确率 (%)')
    plt.legend()
    plt.title('训练和测试准确率')
    
    plt.tight_layout()
    # 保存到输出目录
    save_path = os.path.join(output_dir, 'lenet5_training_results.png')
    plt.savefig(save_path)
    plt.show()

def visualize_filters(model):
    """可视化卷积层过滤器"""
    # 获取第一个卷积层的权重
    weights = model.conv1.weight.data.cpu().numpy()
    
    plt.figure(figsize=(10, 2))
    plt.title("第一个卷积层的过滤器")
    
    for i in range(6):  # LeNet-5的第一个卷积层有6个过滤器
        plt.subplot(1, 6, i+1)
        plt.imshow(weights[i, 0], cmap='gray')
        plt.axis('off')
    
    # 保存到输出目录
    save_path = os.path.join(output_dir, 'lenet5_filters.png')
    plt.savefig(save_path)
    plt.show()

def visualize_feature_maps(model, data_loader):
    """可视化特征图"""
    import torch.nn.functional as F
    
    # 获取一个批次的数据
    dataiter = iter(data_loader)
    images, labels = next(dataiter)
    
    # 选择一个图像进行可视化
    img = images[0:1].to(device)
    
    # 获取特征图
    model.eval()
    with torch.no_grad():
        # 确保输入是32x32
        if img.size(2) != 32:
            img = F.interpolate(img, size=(32, 32), mode='bilinear', align_corners=True)
        
        # 获取第一个卷积层的输出
        conv1_output = F.relu(model.conv1(img))
        
        # 获取第一个池化层的输出
        pool1_output = model.pool1(conv1_output)
        
        # 获取第二个卷积层的输出
        conv2_output = F.relu(model.conv2(pool1_output))
    
    # 可视化原始图像和特征图
    plt.figure(figsize=(15, 10))
    
    # 显示原始图像
    plt.subplot(1, 3, 1)
    plt.imshow(images[0].squeeze().cpu().numpy(), cmap='gray')
    plt.title(f"原始图像: {labels[0].item()}")
    plt.axis('off')
    
    # 显示第一个卷积层的特征图
    plt.subplot(1, 3, 2)
    # 将特征图重新排列为一个网格
    feature_maps = conv1_output.squeeze().cpu().numpy()
    grid_size = int(np.ceil(np.sqrt(feature_maps.shape[0])))
    for i in range(feature_maps.shape[0]):
        plt.subplot(grid_size, grid_size, i+1)
        plt.imshow(feature_maps[i], cmap='viridis')
        plt.axis('off')
    plt.tight_layout()
    plt.title("第一个卷积层特征图")
    
    # 显示第二个卷积层的特征图
    plt.subplot(1, 3, 3)
    # 将特征图重新排列为一个网格
    feature_maps = conv2_output.squeeze().cpu().numpy()
    grid_size = int(np.ceil(np.sqrt(feature_maps.shape[0])))
    for i in range(min(16, feature_maps.shape[0])):  # 只显示前16个特征图
        plt.subplot(4, 4, i+1)
        plt.imshow(feature_maps[i], cmap='viridis')
        plt.axis('off')
    plt.tight_layout()
    plt.title("第二个卷积层特征图")
    
    # 保存到输出目录
    save_path = os.path.join(output_dir, 'lenet5_feature_maps.png')
    plt.savefig(save_path)
    plt.show()

def get_optimizer(optimizer_name, model_parameters, lr, weight_decay, momentum=MOMENTUM):
    """根据名称选择优化器"""
    if optimizer_name.lower() == 'adam':
        return optim.Adam(model_parameters, lr=lr, weight_decay=weight_decay)
    elif optimizer_name.lower() == 'sgd':
        return optim.SGD(model_parameters, lr=lr, momentum=momentum, weight_decay=weight_decay)
    elif optimizer_name.lower() == 'rmsprop':
        return optim.RMSprop(model_parameters, lr=lr, weight_decay=weight_decay)
    else:
        print(f"未知的优化器名称: {optimizer_name}, 使用Adam")
        return optim.Adam(model_parameters, lr=lr, weight_decay=weight_decay)

def main():
    # 设置随机种子以确保结果可复现
    torch.manual_seed(RANDOM_SEED)
    if torch.cuda.is_available():
        torch.cuda.manual_seed(RANDOM_SEED)
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False
    
    # 加载数据
    print("正在加载MNIST数据集...")
    train_loader, test_loader = load_data(BATCH_SIZE)
    print(f"数据加载完成! 训练集大小: {len(train_loader.dataset)}, 测试集大小: {len(test_loader.dataset)}")
    
    # 创建模型
    model = LeNet5().to(device)
    print(model)
    
    # 定义损失函数和优化器
    criterion = nn.CrossEntropyLoss()
    optimizer = get_optimizer(OPTIMIZER, model.parameters(), LEARNING_RATE, WEIGHT_DECAY)
    
    # 检查模型文件是否存在
    model_path = os.path.join(output_dir, 'lenet5_model.pth')
    
    if os.path.exists(model_path):
        # 加载已有模型
        print(f"找到已训练的模型 {model_path}，正在加载...")
        model.load_state_dict(torch.load(model_path, map_location=device))
        
        # 评估模型
        print("评估已加载的模型...")
        test_loss, test_accuracy = test_model(model, test_loader, criterion)
        
        # 询问用户是否要重新训练
        train_model_input = input("是否要重新训练模型? (y/n): ")
        if train_model_input.lower() != 'y':
            # 可视化滤波器和特征图
            visualize_filters(model)
            visualize_feature_maps(model, test_loader)
            return model
    
    # 训练模型
    print(f"开始训练模型... 总训练轮数: {EPOCHS}, 学习率: {LEARNING_RATE}, 优化器: {OPTIMIZER}")
    print(f"批次大小: {BATCH_SIZE}, 权重衰减: {WEIGHT_DECAY}")
    start_time = time.time()
    
    train_losses = []
    train_accuracies = []
    test_losses = []
    test_accuracies = []
    best_accuracy = 0
    
    for epoch in range(1, EPOCHS + 1):
        epoch_start_time = time.time()
        
        # 训练一个epoch
        train_loss, train_accuracy = train_model(model, train_loader, optimizer, criterion, epoch)
        train_losses.append(train_loss)
        train_accuracies.append(train_accuracy)
        
        # 测试模型
        test_loss, test_accuracy = test_model(model, test_loader, criterion)
        test_losses.append(test_loss)
        test_accuracies.append(test_accuracy)
        
        epoch_time = time.time() - epoch_start_time
        print(f"Epoch {epoch} 完成，用时 {epoch_time:.2f} 秒")
        
        # 如果是最佳模型则保存
        if test_accuracy > best_accuracy:
            best_accuracy = test_accuracy
            if SAVE_BEST_ONLY:
                torch.save(model.state_dict(), model_path)
                print(f"保存最佳模型 (准确率: {best_accuracy:.2f}%) 到 {model_path}")
    
    total_time = time.time() - start_time
    print(f"模型训练完成! 总用时: {total_time:.2f} 秒, 最佳准确率: {best_accuracy:.2f}%")
    
    # 如果不是只保存最佳模型，则保存最终模型
    if not SAVE_BEST_ONLY:
        torch.save(model.state_dict(), model_path)
        print(f"模型已保存到 {model_path}")
    
    # 可视化训练结果
    plot_training_results(train_losses, train_accuracies, test_losses, test_accuracies)
    
    # 可视化滤波器和特征图
    visualize_filters(model)
    visualize_feature_maps(model, test_loader)
    
    return model

if __name__ == "__main__":
    main()