# coding: utf-8
"""
Implementing AlexNet using PyTorch

"""
import time

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from rich.progress import track

import wandb

wandb.init(project="mobile-net")


def train(model, train_loader, test_loader, loss_func, optimizer, device, epochs):
    early_stop = {
        "best_acc": 0,
        "patience": 8,
        "ep_count": 0,
    }
    for epoch in range(epochs):
        start_time = time.time()
        train_loss = 0
        for images, labels in track(train_loader):
            images, labels = images.to(device), labels.to(device)
            optimizer.zero_grad()
            outputs = model(images)
            loss = loss_func(outputs, labels)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
        train_loss /= len(train_loader)
        with torch.no_grad():
            # calculate train accuracy
            train_acc = 0
            correct = 0
            total = 0
            for images, labels in train_loader:
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
            train_acc = 100 * correct / total
            # calculate test accuracy
            test_acc = 0
            correct = 0
            total = 0
            for images, labels in test_loader:
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

            test_acc = 100 * correct / total
        wandb.log({"train_loss": train_loss, "train_acc": train_acc, "test_acc": test_acc})
        if test_acc > early_stop["best_acc"]:
            early_stop["best_acc"] = test_acc
            early_stop["ep_count"] = 0
            torch.save(model.state_dict(), "mobile_net.pth")
            print("Model saved!")
        else:
            early_stop["ep_count"] += 1
            if early_stop["ep_count"] >= early_stop["patience"]:
                print(f"Early stopping at epoch {epoch+1}")
                break

        cost = time.strftime("%H:%M:%S", time.gmtime(time.time() - start_time))
        print(f"- Epoch {epoch + 1:3d}/{epochs} [{cost}]", end=", ")
        print(f"loss: {loss:.4f}, train_acc: {train_acc:.2f}, test_acc: {test_acc:.2f}")


if __name__ == "__main__":
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    transform = torchvision.transforms.Compose(
        [
            torchvision.transforms.Resize(256),
            torchvision.transforms.CenterCrop(227),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ]
    )

    data = r"./data"

    traind = torchvision.datasets.CIFAR10(data, train=True, download=False, transform=transform)
    testd = torchvision.datasets.CIFAR10(data, train=False, download=False, transform=transform)
    train_loader = torch.utils.data.DataLoader(traind, batch_size=128, shuffle=True, num_workers=2)
    test_loader = torch.utils.data.DataLoader(testd, batch_size=100, shuffle=False, num_workers=2)

    model = torchvision.models.mobilenet_v2(weights=torchvision.models.MobileNet_V2_Weights.DEFAULT)
    model.classifier = nn.Sequential(
        nn.Dropout(0.2),
        nn.Linear(1280, 512),
        nn.ReLU(inplace=True),
        nn.Dropout(0.2),
        nn.Linear(512, 256),
        nn.ReLU(inplace=True),
        nn.Linear(256, 10),
    )
    model.to(device)

    loss_func = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)

    train(model, train_loader, test_loader, loss_func, optimizer, device, 20)
