import torch
import torch.nn as nn
import math
import os
import datetime
from torch.utils.data import DataLoader
from dataset import CustomDataset
from models import DFN
from utils import Lossor, Evaluator


def main():
    batch_size = 10
    start_cuda = 'cuda:2'
    gpu_ids = [2]

    encoder = 'shufflenet'#encoder = 'resnet' #
    weight_pretrain = './pretrained/shufflenetv2_x1_69.402_88.374.pth.tar' #weight_pretrain = './pretrained/resnet101-imagenet.pth' #

    num_class = 19
    num_show = 2
    ckpt_path = './ckpt'

    last_epoch = 0
    net_weight_path = 'shufflenet-dfn/model_epoch_79.pth'

    dataset=CustomDataset('leftImg8bit', 'gtFine', '_leftImg8bit.png', '_gtFine_labelTrainIds.png')
    trainloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    num_epoch = 150
    focal_weight = 0.1
    learning_rate = 4e-3
    weight_decay = 0.0001
    momentum = 0.9
    power = 0.9
    model_id = encoder+'-dfn-v2'
    if not os.path.isdir(ckpt_path+'/'+model_id):
        os.makedirs(ckpt_path+'/'+model_id)

    if last_epoch != 0:
        weight_pretrain = None
    net = DFN(num_class, weights=weight_pretrain, encoder=encoder)

    device = torch.device(start_cuda if torch.cuda.is_available() else "cpu")
    if torch.cuda.device_count() > 1:
        net = nn.DataParallel(net, device_ids=gpu_ids)
    if last_epoch != 0 and net_weight_path:
        print('loading the net weight : {}'.format(net_weight_path))
        net.load_state_dict(torch.load(ckpt_path + '/' + net_weight_path))
        net.train()
    net.to(device)

    # In[ ]:

    #training
    print('start training on GPU : {}'.format(gpu_ids))
    print('checkpoint will be saved in {}'.format(ckpt_path+'/'+model_id))
    history = {'train': []}
    for epoch in range(last_epoch + 1, num_epoch):  # loop over the dataset multiple times
        crit = Lossor(focal_weight)
        evaluator = Evaluator(num_class)
        lr = learning_rate * math.pow(1 - epoch / num_epoch, power)  # learning_rate *= math.pow(1 - (i+epoch*epoch_iter)/max_iter, power)
        optimizer = torch.optim.SGD(net.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)

        print('learning rate is {}'.format(lr))
        for i, data in enumerate(trainloader, 0):
            inputs, labels, _ = data
            inputs = inputs.float()
            #         labels = labels.float()
            inputs = inputs.to(device)
            labels = labels.to(device)
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = crit.MixLoss(outputs, labels)
            loss.backward()
            optimizer.step()

            evaluator.add_batch(labels, torch.argmax(outputs[-2].data, 1))
            if i % num_show == num_show - 1:
                loss_tmp = crit.mean_loss()
                mIoU_tmp = evaluator.mIoU()
                print(datetime.datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
                      ' epoch: %d/iter: %-5d    loss: %-10.3f    mIoU: %-10.3f' % (
                      epoch , i+1 , loss_tmp, mIoU_tmp))
                history['train'].append({'epoch': epoch , 'iter': i+1 , 'loss': loss_tmp, 'mIoU': mIoU_tmp})

        torch.save(net.state_dict(), '{}/{}/model_epoch_{}.pth'.format(ckpt_path, model_id, epoch))
        torch.save(history, '{}/{}/history_epoch_{}.pth'.format(ckpt_path, model_id, epoch))
    print('training end.')


if __name__ == '__main__':
    main()