import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader
import matplotlib
import matplotlib.pyplot as plt

matplotlib.use('TkAgg')

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
non_blocking = torch.cuda.is_available()

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
])

train_dataset = datasets.MNIST(root='../data/Mnist/', train=True, transform=transform, download=True)
test_dataset = datasets.MNIST(root='../data/Mnist/', train=False, transform=transform, download=True)

batch_size = 128
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)


class NetModel(nn.Module):
    def __init__(self):
        super().__init__()
        self.block1 = nn.Sequential(
            nn.Conv2d(1, 16, 5, 1, 2),
            nn.BatchNorm2d(16),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.block2 = nn.Sequential(
            nn.Conv2d(16, 32, 5, 1, 2),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        self.block3 = nn.Sequential(
            nn.Conv2d(32, 64, 4, 1, 1),
            nn.BatchNorm2d(64),
            nn.ReLU(),
            nn.MaxPool2d(2)
        )
        # Lazy 版全连接层
        self.linear1 = nn.LazyLinear(128)
        self.linear2 = nn.Linear(128, 64)
        self.linear3 = nn.Linear(64, 10)
        self.act = nn.ReLU()

    def forward(self, x):
        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = torch.flatten(x, 1)  # 比 view 更稳健
        x = self.act(self.linear1(x))
        x = self.act(self.linear2(x))
        x = self.linear3(x)
        return x


model = NetModel().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
    optimizer,
    mode='min',
    factor=0.5,
    patience=2,
    min_lr=1e-5
)


def train(model, train_loader, optimizer, criterion, device):
    train_loss_epoch = 0
    total_correct = 0
    total_samples = 0
    for i, (inputs, labels) in enumerate(train_loader):
        inputs = inputs.to(device, non_blocking=non_blocking)
        labels = labels.to(device, non_blocking=non_blocking)
        logits = model(inputs)
        loss = criterion(logits, labels)
        optimizer.zero_grad(set_to_none=True)
        loss.backward()
        optimizer.step()
        train_loss_epoch += loss.item()
        pred = logits.argmax(dim=1)
        correct = (pred == labels).sum().item()
        total_correct += correct
        total_samples += labels.size(0)
    train_loss_epoch = train_loss_epoch / len(train_loader)
    train_acc_epoch = total_correct / total_samples
    return train_loss_epoch, train_acc_epoch


def test(model, test_loader, criterion, device):
    test_loss_epoch = 0
    total_correct = 0
    total_samples = 0
    with torch.no_grad():
        for inputs, labels in test_loader:
            inputs = inputs.to(device, non_blocking=non_blocking)
            labels = labels.to(device, non_blocking=non_blocking)
            logits = model(inputs)
            loss = criterion(logits, labels)
            test_loss_epoch += loss.item()
            pred = logits.argmax(dim=1)
            correct = (pred == labels).sum().item()
            total_correct += correct
            total_samples += labels.size(0)
    test_loss_epoch = test_loss_epoch / len(test_loader)
    test_acc_epoch = total_correct / total_samples
    return test_loss_epoch, test_acc_epoch


if __name__ == '__main__':
    epochs = 15
    train_loss = []
    train_acc = []
    test_loss = []
    test_acc = []
    for epoch in range(epochs):
        model.train()
        train_loss_epoch, train_acc_epoch = train(model, train_dataloader, optimizer, criterion, device)
        model.eval()
        test_loss_epoch, test_acc_epoch = test(model, test_dataloader, criterion, device)
        scheduler.step(test_loss_epoch)
        train_loss.append(train_loss_epoch)
        train_acc.append(train_acc_epoch)
        test_loss.append(test_loss_epoch)
        test_acc.append(test_acc_epoch)
        cur_lr = optimizer.param_groups[0]['lr']
        print(f"[epoch {epoch + 1}] lr={cur_lr:.5f} | "
              f"train_loss={train_loss_epoch:.3f} acc={train_acc_epoch * 100:.1f}% | "
              f"test_loss={test_loss_epoch:.3f} acc={test_acc_epoch * 100:.1f}%")
    plt.figure(figsize=(16, 6))
    plt.subplot(1, 2, 1)
    plt.plot(range(epochs), train_loss, label="train_loss")
    plt.plot(range(epochs), test_loss, label="test_loss")
    plt.legend()
    plt.subplot(1, 2, 2)
    plt.plot(range(epochs), train_acc, label="train_acc")
    plt.plot(range(epochs), test_acc, label="test_acc")
    plt.legend()
    plt.show()
