import os
import torch
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from datetime import datetime as dt

from model import get_crnn, get_res
from dataset import SunDataset
from utils import AverageMeter
# from linear import myModel

device = 'cuda:0'
model_root = 'saved_models/'


def main():
    if not os.path.exists(model_root):
        os.makedirs(model_root)
    if not os.path.exists('logs/'):
        os.makedirs('logs/')

    lr = 1e-4
    batch_size = 1
    # checkpoint = torch.load(model_root+'checkpoint1.pth')
    checkpoint = None

    model = get_res()
    model.to(device)

    params_size = sum(p.numel() for p in list(
        filter(lambda p: p.requires_grad, model.parameters())))
    print('size of {size:.2f}M'.format(size=params_size/1e6))

    # optim = torch.optim.Adam([{'params':feature_params},
    #                         {'params':model.linear.parameters(),'lr':lr*2},],
    #     lr=lr,weight_decay=1e-5)
    optim = torch.optim.Adam(model.parameters(), lr=lr)
    # criterion = torch.nn.L1Loss()
    criterion = torch.nn.SmoothL1Loss()
    # criterion = torch.nn.CrossEntropyLoss()

    train_set = SunDataset(root='D:/datasets/SunData', split='train', size=224, gray=False, delay=3)
    train_loader = DataLoader(
        train_set, batch_size=64, num_workers=0, shuffle=False)

    val_set = SunDataset(root='D:/datasets/SunData', split='val', size=224, gray=False, delay=3)
    val_loader = DataLoader(val_set, batch_size=batch_size,
                            shuffle=False, num_workers=0)

    start_epoch = 0
    if checkpoint is not None:
        # model.load_state_dict(checkpoint)
        start_epoch = checkpoint['epoch']+1
        model.load_state_dict(checkpoint['model'])
        # optim.load_state_dict(checkpoint['optim'])

    num_epoches = 40
    TIMESTAMP = "{:%Y-%m-%dT%H-%M-%S/}".format(dt.now())
    for epoch in range(start_epoch, num_epoches):
        train_loss = AverageMeter()
        test_loss = AverageMeter()

        writer = SummaryWriter(log_dir='logs/log_'+TIMESTAMP, comment='epoch')
        train(epoch, num_epoches, model, train_loader,
              criterion, optim, train_loss, writer)
        torch.save({'model': model.state_dict(), 'optim': optim.state_dict(), 'epoch': epoch},
                   model_root + 'checkpoint{}.pth'.format(epoch))

        val(epoch, model, val_loader, criterion, test_loss, writer)
        writer.close()

        with SummaryWriter(log_dir='logs/whole_proc/', comment='whole') as log:
            log.add_scalar('train/loss', train_loss.avg, epoch)
            log.add_scalar('test/loss', test_loss.avg, epoch)


def train(epoch, n_epoch, model, dataloader, criterion, optim, loss_meter, logger):
    # acc_meter = AverageMeter()
    log_freq = len(dataloader) // 500 if len(dataloader) > 1000 else 1
    save_freq = 20*log_freq
    bar = tqdm(total=len(dataloader))

    model.train()

    for i, (x, y) in enumerate(dataloader):

        y_hat = model(x.to(device)).cpu()
        batch_size = x.shape[0]
        loss = criterion(y_hat.flatten(), y.float())
        loss_meter.update(loss.item()/batch_size)
        # acc_meter.update(
        #     (y == y_hat.argmax(dim=1)).float().sum().item()/batch_size)

        optim.zero_grad()
        # print(loss.dtype)
        loss.backward()
        optim.step()

        bar.update()
        bar.set_description(
            '[{}/{}] loss:{:.5f}'.format(epoch, n_epoch, loss_meter.avg))

        if i % log_freq == 0:

            logger.add_scalar(
                'train/loss_of_epoch{}'.format(epoch), loss_meter.avg, i/log_freq)
        if i % save_freq == save_freq-1:
            logger.add_scalars('train/pred_of_epoch{}'.format(epoch),
                               {'gt': y.mean().item(),
                                'pred': y_hat.mean().item()}, i/log_freq)
            torch.save(model.state_dict(), model_root+'model_last.pth')
    bar.close()


def val(epoch, model, dataloader, criterion, loss_meter, logger):
    # acc_meter = AverageMeter()
    model.eval()

    with torch.no_grad():
        # with SummaryWriter(log_dir='logs/val',comment='val') as log:
        for i, (x, y) in enumerate(dataloader):
            y_hat = model(x.to(device)).cpu()
            batch_size = x.shape[0]
            loss = criterion(y_hat.flatten(), y)
            loss_meter.update(loss.item()/batch_size)
            # acc_meter.update(
            #     (y == y_hat.argmax(dim=1)).float().sum().item()/batch_size)

            logger.add_scalar('test/loss_of_epoch{}'.format(epoch),
                              loss_meter.avg, i)
            logger.add_scalars('test/pred_of_epoch{}'.format(epoch),
                               {'gt': y.mean().item(),
                                'pred': y_hat.mean().item()}, i)


if __name__ == "__main__":
    main()
