

from model import ConvNet
from dataset import get_dataloader
import torch
import parameters as p
import os
from torch.optim.lr_scheduler import LRScheduler
from torch.optim import Optimizer
from torch.nn import Module
import time
from utils import load, save, to_one_hot, get_scheduler


def train(model: Module, optimizer: Optimizer, device, train_dataloader, valid_dataloader, epochs: int = p.EPOCHS,
          save_on_step: int = p.SAVE_ON_STEP,
          scheduler: LRScheduler = None,
          valid_on_step: int = p.VALID_ON_STEP,
          last_epoch: int = 0):
    model = model.to(device=device)
    length = len(train_dataloader)
    print("################################## infomation ##################################")
    print(f"Batchs : {length}")
    print(f"Batch size : {p.BATCH_SIZE}")
    print(f"current device : {device}")
    print(f"")
    print('################################## ##################################\n')
    if length < save_on_step:
        print(
            f"unreasonable operation_on_step:\n total steps: {length}  \n save_on_step: {save_on_step}")
        exit()

    criterion = torch.nn.MSELoss()

    for epoch in range(last_epoch, epochs):
        print(f"epoch : {epoch}")
        accumulator = dict(loss=0)
        for x, y in train_dataloader:
            x = x.to(device)
            y = to_one_hot(y).to(device)
            y_pre = model(x)
            loss = criterion(y_pre, y)
            loss.backward()
            optimizer.step()
            optimizer.zero_grad()
            accumulator["loss"] += loss.data.item()
        scheduler.step()
        print(f"loss : {accumulator['loss'] / length}")
        accumulator["loss"] = 0.

        # if epoch != 0 and epoch % valid_on_step == 0:
        #     model.eval()
        #     for ctr, x, y in enumerate(valid_dataloader):

        #         if ctr == 10:
        #             break
        if epoch != 0 and epoch % save_on_step == 0:
            if not os.path.exists(p.CHECKPOINT_PATH):
                os.mkdir(p.CHECKPOINT_PATH)
            save(model, epoch, optimizer)
            print('checkpoint saved')


if __name__ == '__main__':
    if os.path.exists(p.CHECKPOINT_PATH + "checkpoint.pt"):
        last_epoch, model, optimizer = load()
    else:
        from torch.optim import Adam
        model = ConvNet(is_gray_image=True, num_class=10)
        last_epoch = 0
        optimizer = Adam(model.parameters(), lr=p.LEARNING_RATE)
    optimizer.param_groups[0]['initial_lr'] = p.LEARNING_RATE
    scheduler = get_scheduler(optimizer=optimizer, last_epoch=last_epoch)
    t_dataloader, val_dataloader = get_dataloader()
    device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
    train(model=model, optimizer=optimizer, device=device,
          scheduler=scheduler,
          last_epoch=last_epoch,
          train_dataloader=t_dataloader, valid_dataloader=val_dataloader)
