# coding: utf-8
"""
Implementing AlexNet using PyTorch

"""
import time

import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
from rich.progress import track

import wandb

wandb.init(project="alexnet")


class AlexNet(nn.Module):
    def __init__(self, num_classes: int = 1000) -> None:
        super(AlexNet, self).__init__()
        # input size: (b, 3, 227, 227)
        self.features = nn.Sequential(
            nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),  # (b, 64, 55, 55)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),  # (b, 64, 27, 27)
            nn.Conv2d(64, 192, kernel_size=5, padding=2),  # (b, 192, 27, 27)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),  # (b, 192, 13, 13)
            nn.Conv2d(192, 384, kernel_size=3, padding=1),  # (b, 384, 13, 13)
            nn.ReLU(),
            nn.Conv2d(384, 256, kernel_size=3, padding=1),  # (b, 256, 13, 13)
            nn.ReLU(),
            nn.Conv2d(256, 256, kernel_size=3, padding=1),  # (b, 256, 13, 13)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),  # (b, 256, 6, 6)
            # nn.AdaptiveAvgPool2d((1, 1)),  # (b, 256, 1, 1)
        )
        self.classifier = nn.Sequential(
            nn.Dropout(0.5),
            nn.Linear(256 * 6 * 6, 4096),
            nn.ReLU(),
            nn.Dropout(0.5),
            nn.Linear(4096, 4096),
            nn.ReLU(),
            nn.Linear(4096, num_classes),
        )
        self.loss_func = nn.CrossEntropyLoss()
        self.optimizer = optim.SGD(self.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        x = self.features(x)
        x = x.view(x.size(0), -1)
        return self.classifier(x)

    def fit(self, train_loader, test_loader, epochs: int = 50) -> None:
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.to(device)
        seed = torch.initial_seed()
        torch.manual_seed(seed)
        best = 0.0  # best test accuracy
        best_epoch = 0  # best test accuracy epoch
        counter = 0  # early stopping counter
        tolerance = 10  # early stopping tolerance
        for epoch in range(epochs):
            loss = 0.0
            start = time.time()
            for data in track(train_loader, description=f"- Epoch {epoch + 1:3d}/{epochs}"):
                inputs, labels = data
                inputs, labels = inputs.to(device), labels.to(device)
                self.optimizer.zero_grad()
                outputs = self.forward(inputs)
                loss = self.loss_func(outputs, labels)
                loss.backward()
                self.optimizer.step()
            train_acc = self.evaluate(train_loader)
            test_acc = self.evaluate(test_loader)
            cost = time.strftime("%H:%M:%S", time.gmtime(time.time() - start))
            print(f"- Epoch {epoch + 1:3d}/{epochs} [{cost}]", end=", ")
            print(f"loss: {loss:.4f}, train_acc: {train_acc:.2f}, test_acc: {test_acc:.2f}")
            wandb.log({"loss": loss, "train_accuracy": train_acc, "test_accuracy": test_acc})

            # early stopping
            if test_acc > best:
                best = test_acc
                best_epoch = epoch + 1
                counter = 0
                model_path = f"./models/alexnet_{epoch + 1}.pth"
                self.save(model_path)
            else:
                counter += 1
                if counter >= tolerance:
                    print(f"Early stopping at epoch {epoch + 1}")
                    break
        print(f"Best test accuracy: {best:.4f} at epoch {best_epoch}")

    def evaluate(self, dataloader) -> float:
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.to(device)
        with torch.no_grad():
            correct = 0
            total = 0
            for data in dataloader:
                inputs, labels = data
                inputs, labels = inputs.to(device), labels.to(device)
                outputs = self.predict(inputs)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
            return 100 * correct / total

    def predict(self, x: torch.Tensor) -> torch.Tensor:
        x = self.forward(x)
        return x

    def save(self, path: str) -> None:
        torch.save(self.state_dict(), path)

    def load(self, path: str) -> None:
        self.load_state_dict(torch.load(path))


def visualize():
    x = torch.randn(1, 3, 227, 227)
    model = AlexNet()
    torch.onnx.export(model, x, "alexnet.onnx", verbose=True)
    import netron

    netron.start("alexnet.onnx")


if __name__ == "__main__":
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    transform = torchvision.transforms.Compose(
        [
            torchvision.transforms.Resize(256),
            torchvision.transforms.CenterCrop(227),
            torchvision.transforms.ToTensor(),
            torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ]
    )

    data = r"./data"

    train = torchvision.datasets.CIFAR10(data, train=True, download=False, transform=transform)
    test = torchvision.datasets.CIFAR10(data, train=False, download=False, transform=transform)
    train_loader = torch.utils.data.DataLoader(train, batch_size=128, shuffle=True, num_workers=2)
    test_loader = torch.utils.data.DataLoader(test, batch_size=100, shuffle=False, num_workers=2)

    model = AlexNet()
    model.to(device)

    model.loss_func = nn.CrossEntropyLoss()
    model.optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)

    model.fit(train_loader, test_loader, epochs=50)
