import torch
import torch.nn as nn
from torch.utils.data import random_split, DataLoader
import torch.optim as optimizer
import config
import os
import datetime
from utils import *
import dataset
from model import ResNet

# dataset
dataset = dataset.GTZANDataset(config.DATA_DIR, config.LABEL, transform=config.transformer)
train_size = int(0.8 * len(dataset))
test_size = len(dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])

train_loader = DataLoader(
    dataset=train_dataset,
    batch_size=config.BATCH_SIZE,
    shuffle=True,
    num_workers=config.NUM_WORKERS,
    pin_memory=True
)

test_loader = DataLoader(
    dataset=test_dataset,
    batch_size=config.BATCH_SIZE,
    shuffle=True,
    num_workers=config.NUM_WORKERS,
    pin_memory=True
)

model = ResNet.ResNet18(num_classes=10).to(config.DEVICE)
loss_function = nn.CrossEntropyLoss()
optimizer = optimizer.Adam(model.parameters(), lr=config.LEARNING_RATE)

Scaler = torch.cuda.amp.GradScaler()

train_losses, train_acc, test_acc = [], [], []
def train(epoch):
    train_loss, train_correct, test_correct = 0.0, 0.0, 0.0
    for images, labels in train_loader:
        one_hot_labels = torch.Tensor([one_hot_label(i, 10) for i in labels])
        images = images.to(config.DEVICE)
        labels = labels.to(config.DEVICE)
        one_hot_labels = one_hot_labels.to(config.DEVICE)
        optimizer.zero_grad()
        with torch.cuda.amp.autocast():
            output = model(images)
            loss = loss_function(output, one_hot_labels)

            pred = output.argmax(dim=1)
            correct = torch.eq(pred, labels).float().sum().item()
            train_correct += correct

        loss.backward()
        optimizer.step()

        train_loss += loss.item()

    model.eval()
    test_correct = 0
    for images, labels in test_loader:
        images = images.to(config.DEVICE)
        labels = labels.to(config.DEVICE)

        with torch.cuda.amp.autocast():
            output = model(images)
            pred = output.argmax(dim=1)
            correct = torch.eq(pred, labels).float().sum().item()
            test_correct += correct

    train_losses.append(train_loss / train_size)
    train_acc.append(train_correct / train_size)
    test_acc.append(test_correct / test_size)
    print("=> epoch {}: train loss: {}, train_acc: {}, test_acc: {}"
          .format(epoch + 1, train_loss / train_size, train_correct / train_size, test_correct / test_size))
    print("=> train_correct: {}, train_size: {}, test_correct: {}, test_size: {}"
          .format(train_correct, train_size, test_correct, test_size))

def main():
    if config.LOAD_MODEL:
        load_checkpoint(model, optimizer, "checkpoint/checkpoint.pth.tar", config.LEARNING_RATE)
    print("=> Starting training")

    for epoch in range(config.NUM_EPOCHS):
        model.train()
        train(epoch)

    if config.SAVE_MODEL:
        save_checkpoint(model, optimizer, "checkpoint/checkpoint.pth.tar")

    draw(train_losses, test_acc, test_acc)

if __name__ == "__main__":
    main()






