import json
from datetime import datetime

import matplotlib.pyplot as plt
import seaborn as sns
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.metrics import confusion_matrix
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader, random_split
from torchvision import datasets, transforms
from tqdm import tqdm

from model import CNN

# 超参数配置
BATCH_SIZE = 64
EPOCHS = 500
LEARNING_RATE = 0.001
PATIENCE = 40  # 早停耐心值

# 设备配置
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 创建CNN模型实例并移动到指定设备
model = CNN().to(device)
# 定义交叉熵损失函数用于分类任务
criterion = nn.CrossEntropyLoss()
# 使用Adam优化器并设置学习率
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
# 配置学习率调度器：当验证损失停止改善时（连续2个周期），将学习率减半（最小降至1e-6）
scheduler = ReduceLROnPlateau(optimizer, 'min', patience=2, factor=0.5, min_lr=1e-6)
# 数据预处理
transform = transforms.Compose([
    transforms.RandomAffine(degrees=10, translate=(0.1, 0.1)),  # 轻微旋转/平移
    transforms.ToTensor(),  # 将PIL图像转为[0,1]范围的张量
    transforms.Normalize((0.1307,), (0.3081,))  # 用均值0.1307和标准差0.3081进行标准化（适用于MNIST数据集）
])


def train_loop():
    # 清空日志文件（覆盖模式）
    with open("training_log.json", "w") as f:
        pass  # 打开后立即关闭，清空内容
    """
    下载MNIST手写数字数据集：
datasets.MNIST调用PyTorch内置数据集类
root='./data'指定数据存储路径为当前目录下的data文件夹
download=True表示若本地无数据则自动联网下载
    """
    datasets.MNIST(root='./data', download=True)
    best_val_loss = float('inf')  # 设置初始最佳验证损失为无穷大，确保首次验证损失必被更新
    early_stop_counter = 0  # 初始化早停计数器为0，用于统计连续验证损失未改善的轮次

    for epoch in range(1, EPOCHS + 1):

        train_dataset = datasets.MNIST(
            root='./data',
            train=True,
            transform=transform
        )
        # 拆分训练集为训练+验证（8:2比例）
        val_size = len(train_dataset) // 5  # 60000/5=12000
        train_size = len(train_dataset) - val_size
        train_data, val_data = random_split(
            train_dataset, [train_size, val_size]
        )

        train_loader = DataLoader(
            dataset=train_data,
            batch_size=BATCH_SIZE,
            shuffle=True
        )

        val_loader = DataLoader(
            dataset=val_data,
            batch_size=BATCH_SIZE,
            shuffle=False
        )
        best_val_loss, early_stop_counter = train(
            model, device, train_loader, val_loader, optimizer, scheduler, epoch, best_val_loss, early_stop_counter
        )

        if early_stop_counter >= PATIENCE:
            print(f"Early stopping at epoch {epoch}")
            break


def train(model, device, train_loader, val_loader, optimizer, scheduler, epoch, best_val_loss, early_stop_counter):
    model.train()
    train_loss = 0
    correct = 0
    used_num = 0

    progress_bar = tqdm(train_loader, desc=f"Epoch {epoch}")
    for data, target in progress_bar:
        # 数据加载：从进度条迭代器中取出数据并移至指定设备
        data, target = data.to(device), target.to(device)
        # 前向传播：清空梯度后进行模型推理，计算损失值
        optimizer.zero_grad()
        output = model(data)
        loss = criterion(output, target)
        # 反向传播：通过loss.backward()计算梯度，优化器更新模型参数
        loss.backward()
        optimizer.step()
        # 累计训练损失、训练过的总样本数和正确样本数，实时更新进度条显示平均损失和准确率
        train_loss += loss.item() * data.size(0)
        used_num += data.size(0)
        pred = output.argmax(dim=1, keepdim=True)
        correct += pred.eq(target.view_as(pred)).sum().item()
        progress_bar.set_postfix({
            "Loss": f"{train_loss / used_num:.4f}",
            "Acc": f"{100. * correct / used_num:.2f}%"
        })

    # 验证步骤
    val_loss, val_acc = test(model, device, val_loader, validation=True)
    scheduler.step(val_loss)

    # 早停和保存最佳模型
    if val_loss < best_val_loss:
        best_val_loss = val_loss
        early_stop_counter = 0
        torch.save(model.state_dict(), "best_model.pth")
    else:
        early_stop_counter += 1

    # 记录日志
    log_entry = {
        "epoch": epoch,
        "train_loss": train_loss / len(train_loader.dataset),
        "train_acc": correct / len(train_loader.dataset),
        "val_loss": val_loss,
        "val_acc": val_acc,
        "lr": optimizer.param_groups[0]['lr'],
        "time": datetime.now().isoformat()
    }

    with open("training_log.json", "a") as f:
        f.write(json.dumps(log_entry) + "\n")

    return best_val_loss, early_stop_counter


def test_model():
    model.load_state_dict(torch.load("best_model.pth"))  # 加载最佳模型
    test_dataset = datasets.MNIST(
        root='./data',
        train=False,
        transform=transform
    )

    test_loader = DataLoader(
        dataset=test_dataset,
        batch_size=BATCH_SIZE,
        shuffle=False
    )

    test_loss, test_acc = test(model, device, test_loader)
    log_entry = {
        "test_loss": test_loss,
        "test_acc": test_acc,
        "type": "test_result"
    }
    print(f"\nTest Results - Loss: {test_loss:.4f}, Accuracy: {test_acc:.2%}")

    with open("training_log.json", "a") as f:
        f.write(json.dumps(log_entry) + "\n")


def test(model, device, test_loader, validation=False):
    model.eval()
    test_loss = 0
    correct = 0
    all_preds = []
    all_targets = []

    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += criterion(output, target).item() * data.size(0)
            pred = output.argmax(dim=1, keepdim=True)
            correct += pred.eq(target.view_as(pred)).sum().item()

            # 收集预测结果和真实标签
            all_preds.extend(pred.cpu().view(-1).tolist())
            all_targets.extend(target.cpu().tolist())

    test_loss /= len(test_loader.dataset)
    accuracy = correct / len(test_loader.dataset)

    if not validation:
        # 生成混淆矩阵
        cm = confusion_matrix(all_targets, all_preds)
        plt.figure(figsize=(10, 8))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
        plt.title('Confusion Matrix')
        plt.xlabel('Predicted')
        plt.ylabel('True')
        plt.savefig('static/confusion_matrix.png')
        plt.close()

    return test_loss, accuracy


if __name__ == "__main__":
    train_loop()
    test_model()
