import os
import datetime
import torch
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from mnistmodel import Net  # 导入模型

# 超参数
batch_size = 64
learning_rate = 0.001
momentum = 0.5
EPOCH = 20
optimizer_type = 'adam'

# 准备数据集
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))])
train_dataset = datasets.MNIST(root='./data/mnist', download=True, train=True, transform=transform)
test_dataset = datasets.MNIST(root='./data/mnist', download=True, train=False, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# 获取优化器函数
def get_optimizer(model, optimizer_type='sgd', lr=0.001, momentum=0.5):
    if optimizer_type == 'sgd':
        return torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
    elif optimizer_type == 'adadelta':
        return torch.optim.Adadelta(model.parameters(), lr=lr)
    elif optimizer_type == 'adam':
        return torch.optim.Adam(model.parameters(), lr=lr)
    else:
        raise ValueError(f'Unknown optimizer type: {optimizer_type}')

# 训练函数
def train(epoch, device, loss_history):
    model.train()  # 设置为训练模式
    running_loss = 0.0  
    running_total = 0
    running_correct = 0
    
    for batch_idx, (inputs, target) in enumerate(train_loader):
        inputs, target = inputs.to(device), target.to(device)  # 将数据转移到GPU
        
        optimizer.zero_grad()  # 清空梯度
        
        outputs = model(inputs)
        loss = criterion(outputs, target)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        _, predicted = torch.max(outputs.data, dim=1)
        running_total += inputs.shape[0]
        running_correct += (predicted == target).sum().item()

        if batch_idx % 300 == 299:  
            print('[%d, %5d]: loss: %.3f , acc: %.2f %%'
                  % (epoch + 1, batch_idx + 1, running_loss / 300, 100 * running_correct / running_total))
            loss_history.append(running_loss / 300)
            running_loss = 0.0  
            running_total = 0
            running_correct = 0


    if epoch == EPOCH - 1:
        # 获取当前时间
        current_time = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
        
        # 保存模型
        save_dir = 'Models'
        name = f'num_recognition_model_epoch{epoch}_{current_time}'
        model.save(save_dir, name)

# 测试函数
def test(epoch, device):
    model.eval()  # 设置为评估模式
    correct = 0
    total = 0
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(device), labels.to(device)  # 将数据转移到GPU
            outputs = model(images)
            _, predicted = torch.max(outputs.data, dim=1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()
    
    acc = correct / total
    print('[%d / %d]: 测试集准确率: %.1f %% ' % (epoch + 1, EPOCH, 100 * acc))
    return acc

def plot_loss(loss_history):
    # 设置 Seaborn 风格
    sns.set(style='whitegrid')  # 配置 Seaborn 风格为带网格的白色背景
    
    plt.figure(figsize=(10, 5))
    
    # 绘制损失曲线，柔和颜色
    plt.plot(loss_history, label='Training Loss', color=sns.color_palette("pastel")[0], linestyle='-', marker='o', markersize=5, linewidth=2)

    # 添加标题和坐标标签
    plt.title('Training Loss Curve', fontsize=16, fontweight='bold')
    plt.xlabel('Epochs', fontsize=14)
    plt.ylabel('Loss', fontsize=14)

    plt.xticks([])

    # 显示网格
    plt.grid(True, linestyle='--', alpha=0.7)

    # 自动设置 y 轴范围
    plt.ylim(np.min(loss_history) - 0.05, np.max(loss_history) + 0.05)

    # 显示 legend
    plt.legend(fontsize=12)

    # 保存损失曲线图
    plt_dir = 'Plots'
    os.makedirs(plt_dir, exist_ok=True)  # 确保图像路径存在
    plt.savefig(os.path.join(plt_dir, 'training_loss_curve.png'), bbox_inches='tight', dpi=300)  # 设置更高的 DPI
    plt.close()


# 主程序
if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')  # 检查是否有GPU可用

    model = Net().to(device)  # 将模型转移到GPU
    criterion = torch.nn.CrossEntropyLoss()  # 交叉熵损失
    # optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum)  # 优化器
    optimizer = get_optimizer(model, optimizer_type=optimizer_type, lr=learning_rate, momentum=momentum)  # 根据选择的优化器类型获取优化器

    loss_history = []

    # 训练和测试周期
    for epoch in range(EPOCH):
        train(epoch, device, loss_history)
        test(epoch, device)

    # plot_loss(loss_history)
    # print(loss_history)