import torch
from torch.utils.data import DataLoader
from torchvision import datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from VGG import VGG11

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f"[INFO]: Computation device: {device}")
epochs = 10
batch_size = 4

train_transform = transforms.Compose([transforms.Resize([224, 224]), transforms.ToTensor()])
valid_transform = transforms.Compose([transforms.Resize([224, 224]), transforms.ToTensor()])
test_transform = transforms.Compose([transforms.Resize([224, 224]), transforms.ToTensor()])

# 20%作为valid数据
dataset = datasets.MNIST(root="data", train=True, transform=train_transform, download=True)
train_size = int(0.8 * len(dataset))
valid_size = len(dataset) - train_size
train_dataset, valid_dataset = torch.utils.data.random_split(dataset, [train_size, valid_size])
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
valid_dataloader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)
test_dataset = datasets.MNIST(root='data', train=False, transform=test_transform, download=True)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

# initialize the model
model = VGG11(in_channels=1, out_classes=10).to(device)
# parameters
total_params = sum(p.numel() for p in model.parameters())
print(f"[INFO]: {total_params:,} total parameters.")
total_trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(f"[INFO]: {total_trainable_params:,} trainable parameters.")
# criterion
criterion = nn.CrossEntropyLoss()
# optimizer
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)

# training
def train(model, trainloader, optimizer, criterion):
    model.train()
    print("Training...")
    training_loss = 0
    training_correct = 0
    counter = 0

    for i, data in tqdm(enumerate(trainloader), total=len(trainloader)):
        counter += 1
        # to device
        image, label = data
        image = image.to(device)
        labels = label.to(device)
        # initialize grad
        optimizer.zero_grad()
        # forward
        outputs = model(image)
        # compute loss
        loss = criterion(outputs, labels)
        training_loss += loss.item()
        # compute accuracy
        preds = outputs.argmax(dim=1)
        training_correct += (preds == labels).sum().item()
        # backward
        loss.backward()
        optimizer.step()

    epochs_loss = training_loss / counter
    epochs_acc = 100. * (training_correct / len(trainloader.dataset))

    return epochs_loss, epochs_acc
# validate
def validate(model, validloader, criterion):
    model.eval()

    class_correct = list(0 for i in range(10))
    class_total = list(0 for i in range(10))
    print('Validation')
    valid_loss = 0
    valid_correct = 0
    counter = 0

    with torch.no_grad():
        for i, data in tqdm(enumerate(validloader), total=len(validloader)):
            counter += 1

            image, labels = data
            image = image.to(device)
            labels = labels.to(device)
            # foward
            outputs = model(image)
            # compute loss
            loss = criterion(outputs, labels)
            valid_loss += loss
            # compute accuracy
            _, preds = outputs.argmax(dim=1)
            valid_correct += (preds == labels).sum().item()
            # compute accuracy for each class
            correct = (preds == labels).squeeze()

            for i in range(len(preds)):
                label = labels[i]
                class_correct[label] += correct[i].item()
                class_total[label] += 1

    epoch_loss = valid_loss / counter
    epoch_acc = 100. * (valid_correct / len(validloader.dataset))

    print('\n')
    for i in range(10):
        print(f"Accuracy of digit {i}: {100. * class_correct[i] / class_total[i]}")

    return epoch_loss, epoch_acc

# start training
train_loss, valid_loss = [], []
train_acc, valid_acc = [], []

for epoch in range(epochs):
    print(f"[INFO]: Epoch {epoch + 1} of {epochs}")

    train_epoch_loss, train_epoch_acc = train(model, train_dataloader, optimizer, criterion)
    valid_epoch_loss, valid_epoch_acc = validate(model, valid_dataloader, criterion)

    train_loss.append(train_epoch_loss)
    train_acc.append(train_epoch_acc)
    valid_loss.append(valid_epoch_loss)
    valid_acc.append(valid_epoch_acc)

    print('\n')
    print(f"Training loss: {train_epoch_loss:.3f}, training acc: {train_epoch_acc:.3f}")
    print(f"Validation loss: {valid_epoch_acc:.3f}, validation acc: {valid_epoch_acc:.3f}")

    print('-'*50)
