import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import seaborn as sns
import os


def get_data_loaders():
    train_transform = transforms.Compose([
        transforms.RandomCrop(size=32, padding=4),
        transforms.RandomHorizontalFlip(p=0.5),
        transforms.RandomRotation(degrees=(-15, 15)),
        transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])

    test_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])

    train_dataset = datasets.CIFAR10(
        root='./data', train=True, download=True, transform=train_transform
    )
    test_dataset = datasets.CIFAR10(
        root='./data', train=False, download=True, transform=test_transform
    )

    train_loader = DataLoader(
        train_dataset, batch_size=64, shuffle=True, num_workers=2, pin_memory=True
    )
    test_loader = DataLoader(
        test_dataset, batch_size=64, shuffle=False, num_workers=2, pin_memory=True
    )

    return train_loader, test_loader, train_dataset.classes


class CIFAR10_CNN(nn.Module):
    def __init__(self):
        super(CIFAR10_CNN, self).__init__()

        self.conv1 = nn.Sequential(
            nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1),
            nn.BatchNorm2d(64),  # Accelerate convergence
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)  # Downsample to 16×16
        )

        self.conv2 = nn.Sequential(
            nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)  # Downsample to 8×8
        )

        self.conv3 = nn.Sequential(
            nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)  # Downsample to 4×4
        )

        self.conv4 = nn.Sequential(
            nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)  # Downsample to 2×2
        )

        self.fc_layers = nn.Sequential(
            nn.Flatten(),
            nn.Linear(512 * 2 * 2, 1024),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.5),  # Prevent overfitting
            nn.Linear(1024, 10)
        )

    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        x = self.conv4(x)
        x = self.fc_layers(x)
        return x


def train_model(model, train_loader, test_loader, optimizer, scheduler, criterion, device, epochs, save_path):
    train_losses = []
    test_losses = []
    test_accs = []

    os.makedirs(os.path.dirname(save_path), exist_ok=True)

    print(f"开始训练 {optimizer.__class__.__name__}...")
    for epoch in range(1, epochs + 1):

        model.train()
        total_train_loss = 0.0
        for batch_idx, (data, targets) in enumerate(train_loader):
            data, targets = data.to(device), targets.to(device)

            outputs = model(data)
            loss = criterion(outputs, targets)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            total_train_loss += loss.item()

        avg_train_loss = total_train_loss / len(train_loader)
        train_losses.append(avg_train_loss)

        model.eval()
        total_test_loss = 0.0
        correct = 0
        total = 0
        with torch.no_grad():
            for data, targets in test_loader:
                data, targets = data.to(device), targets.to(device)
                outputs = model(data)
                loss = criterion(outputs, targets)

                total_test_loss += loss.item()
                _, preds = torch.max(outputs, 1)
                correct += (preds == targets).sum().item()
                total += targets.size(0)

        avg_test_loss = total_test_loss / len(test_loader)
        test_acc = 100 * correct / total
        test_losses.append(avg_test_loss)
        test_accs.append(test_acc)

        scheduler.step()

        print(f"Epoch [{epoch}/{epochs}] | "
              f"Train Loss: {avg_train_loss:.4f} | "
              f"Test Loss: {avg_test_loss:.4f} | "
              f"Test Acc: {test_acc:.2f}%")

    torch.save(model.state_dict(), save_path)
    print(f"Model saved to {save_path}")

    return train_losses, test_losses, test_accs


def evaluate_model(model, test_loader, classes, device, save_dir):
    os.makedirs(save_dir, exist_ok=True)

    model.eval()
    correct = 0
    total = 0
    all_preds = []
    all_targets = []
    with torch.no_grad():
        for data, targets in test_loader:
            data, targets = data.to(device), targets.to(device)
            outputs = model(data)
            _, preds = torch.max(outputs, 1)

            correct += (preds == targets).sum().item()
            total += targets.size(0)
            all_preds.extend(preds.cpu().numpy())
            all_targets.extend(targets.cpu().numpy())

    overall_acc = 100 * correct / total
    print(f"整体测试准确率: {overall_acc:.2f}%")

    cm = confusion_matrix(all_targets, all_preds)
    plt.figure(figsize=(10, 8))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=classes, yticklabels=classes)
    plt.xlabel("Predicted Class")
    plt.ylabel("True Class")
    plt.title("Confusion Matrix (CIFAR-10 Test Set)")
    plt.savefig(os.path.join(save_dir, "confusion_matrix.png"), dpi=300, bbox_inches='tight')
    plt.close()

    return overall_acc


def plot_metrics(adam_losses, adam_accs, sgd_losses, sgd_accs, save_dir):
    os.makedirs(save_dir, exist_ok=True)

    plt.figure(figsize=(12, 5))

    plt.subplot(1, 2, 1)
    plt.plot(adam_losses["train"], label="Adam - Train Loss", color="#1f77b4")
    plt.plot(adam_losses["test"], label="Adam - Test Loss", color="#ff7f0e")
    plt.plot(sgd_losses["train"], label="SGD - Train Loss", color="#2ca02c")
    plt.plot(sgd_losses["test"], label="SGD - Test Loss", color="#d62728")
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.title("Training & Test Loss Curves")
    plt.legend()
    plt.grid(alpha=0.3)

    plt.subplot(1, 2, 2)
    plt.plot(adam_accs, label="Adam - Test Accuracy", color="#1f77b4")
    plt.plot(sgd_accs, label="SGD - Test Accuracy", color="#2ca02c")
    plt.xlabel("Epoch")
    plt.ylabel("Accuracy (%)")
    plt.title("Test Accuracy Curves")
    plt.legend()
    plt.grid(alpha=0.3)

    plt.tight_layout()
    plt.savefig(os.path.join(save_dir, "loss_acc_curves.png"), dpi=300, bbox_inches='tight')
    plt.close()


def main():
    EPOCHS = 30
    DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    SAVE_MODEL_DIR = "../saved_models/"
    RESULTS_DIR = "../results/"

    train_loader, test_loader, classes = get_data_loaders()
    print(f"数据加载成功,Device: {DEVICE}")
    print(f"Classes: {classes}")

    criterion = nn.CrossEntropyLoss()

    model_adam = CIFAR10_CNN().to(DEVICE)
    optimizer_adam = optim.Adam(model_adam.parameters(), lr=0.001, weight_decay=1e-4)
    scheduler_adam = optim.lr_scheduler.StepLR(optimizer_adam, step_size=10, gamma=0.1)  # LR scheduling
    adam_save_path = os.path.join(SAVE_MODEL_DIR, "cifar10_cnn_adam.pth")

    adam_train_loss, adam_test_loss, adam_test_acc = train_model(
        model=model_adam,
        train_loader=train_loader,
        test_loader=test_loader,
        optimizer=optimizer_adam,
        scheduler=scheduler_adam,
        criterion=criterion,
        device=DEVICE,
        epochs=EPOCHS,
        save_path=adam_save_path
    )

    model_sgd = CIFAR10_CNN().to(DEVICE)
    optimizer_sgd = optim.SGD(model_sgd.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
    scheduler_sgd = optim.lr_scheduler.StepLR(optimizer_sgd, step_size=10, gamma=0.1)  # LR scheduling
    sgd_save_path = os.path.join(SAVE_MODEL_DIR, "cifar10_cnn_sgd.pth")

    sgd_train_loss, sgd_test_loss, sgd_test_acc = train_model(
        model=model_sgd,
        train_loader=train_loader,
        test_loader=test_loader,
        optimizer=optimizer_sgd,
        scheduler=scheduler_sgd,
        criterion=criterion,
        device=DEVICE,
        epochs=EPOCHS,
        save_path=sgd_save_path
    )

    print("\n正在评估Adam优化模型...")
    evaluate_model(model_adam, test_loader, classes, DEVICE, RESULTS_DIR)

    metrics = {
        "adam_losses": {"train": adam_train_loss, "test": adam_test_loss},
        "sgd_losses": {"train": sgd_train_loss, "test": sgd_test_loss},
        "adam_accs": adam_test_acc,
        "sgd_accs": sgd_test_acc
    }
    plot_metrics(
        adam_losses=metrics["adam_losses"],
        adam_accs=metrics["adam_accs"],
        sgd_losses=metrics["sgd_losses"],
        sgd_accs=metrics["sgd_accs"],
        save_dir=RESULTS_DIR
    )
    print("训练结束. 保存结果至:../results/")


if __name__ == "__main__":
    main()
