'''
Author: SlytherinGe
LastEditTime: 2022-02-14 15:46:29
'''
import os
import torch
import torch.optim as optim
import torch.utils.data as data
import torch.backends.cudnn as cudnn
import matplotlib.pyplot as plt
import torch.nn.functional as F

from models import *
from data import *

import os

os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

def get_scheduler(optimizer ,cfg):
    scheduer = None
    if cfg['hyperparam']['lr_schedule'] == 'cosine':
        scheduer = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                        cfg['hyperparam']['epoch'] - 1,
                                                        cfg['hyperparam']['lr']*0.001)
    elif cfg['hyperparam']['lr_schedule'] == 'multisteps':
        scheduer = optim.lr_scheduler.MultiStepLR(  optimizer,
                                                    cfg['hyperparam']['steps'],
                                                    cfg['hyperparam']['gamma'])
    elif cfg['hyperparam']['lr_schedule'] == 'exponential':
        scheduer = optim.lr_scheduler.ExponentialLR(optimizer,
                                                    cfg['hyperparam']['gamma'])

    return scheduer

def train_model(cfg):

    # create dataset
    dataset_cfg = cfg['dataset']
    train_dataset = RamdonInstanceDataset(dataset_cfg['num_train'], dataset_cfg['img_size'],
                                          dataset_cfg['bg_color'], dataset_cfg['target_color'])
    val_dataset = RamdonInstanceDataset(dataset_cfg['num_test'], dataset_cfg['img_size'],
                                          dataset_cfg['bg_color'], dataset_cfg['target_color'])
    # creat model
    model_cfg = cfg['model']
    model_resnet = create_resnetX_from_pretrained(model_cfg['resnet_type'],
            model_cfg['embed_dims'], model_cfg['pretrained_path'])
    # create loss function
    # loss_fn_push = model_cfg.get('loss_push', FeaturePushLoss())
    # loss_fn_pull = model_cfg.get('loss_pull', FeaturePullLoss())
    loss_fn_seg = torch.nn.BCELoss()
    # init model
    net = torch.nn.DataParallel(model_resnet)
    cudnn.benchmark = True
    # create optimizer
    optimizer = optim.SGD(net.parameters(), cfg['hyperparam']['lr'],
                                cfg['hyperparam']['momentum'],
                                weight_decay=5e-4)
    # init dataloader
    train_loader = data.DataLoader(train_dataset, cfg['hyperparam']['batch_size'],
                                    num_workers=cfg['hyperparam']['num_workers'],
                                    collate_fn=detection_collate)
    # start training
    EPOCH = cfg['hyperparam']['epoch']
    scheduler = get_scheduler(optimizer, cfg)
    iter_per_epoch = int(len(train_dataset) / cfg['hyperparam']['batch_size'])
    net.train()
    print('training start!')
    for epoch in range(EPOCH):
        batch_iterator = iter(train_loader)
        iteration = 0   # iter count
        total_loss = 0.0
        # decay learning rate
        while True:
            try:
                images, targets = next(batch_iterator)
            except StopIteration:
                # batch end
                break    
            # using cuda
            images = images.cuda()
            # forward
            out = net(images)
            # get loss targets
            multi_img_embed_groups = get_intance_seg_targets(out, targets)
            # backprop
            optimizer.zero_grad()
            loss_seg = loss_fn_seg(out.sigmoid(), multi_img_embed_groups)
            
            loss = loss_seg
            total_loss += loss
            loss.backward()
            optimizer.step()
            # # get loss targets
            # multi_img_embed_groups = get_instance_embed_groups(out, targets)
            # # backprop
            # optimizer.zero_grad()
            # loss_pull, loss_push = 0.0, 0.0
            # num_img = len(multi_img_embed_groups)
            # for embed_groups in multi_img_embed_groups:
            #     loss_pull += loss_fn_pull(embed_groups)
            #     loss_push += loss_fn_push(embed_groups)
            # loss = (loss_push + loss_pull) / (2.0 * num_img)
            # total_loss += loss
            # loss.backward()
            # optimizer.step()
            iteration += 1
            if iteration % 100 == 0:
                print("iter:[{:d}/{:d}] epoch:[{:d}/{:d}] lr:{} loss:{:.4f}".format(
                        iteration, iter_per_epoch, epoch+1, EPOCH, 
                        scheduler.get_last_lr(),
                        loss.item()
                ))
        print('average loss is: {:.3f}'.format(total_loss/iteration))
        print('saving model...')
        torch.save(model_resnet.state_dict(),os.path.join(cfg['model']['local_root'],'tmp/','{}_{}.pth'.format(cfg['model']['resnet_type'], epoch+1)))
        print('starting to evaluate model...')
        plot_embed_results(net, val_dataset, epoch+1, EPOCH)
        scheduler.step()
    print('training done!')

def detection_collate(batch):
    """Custom collate fn for dealing with batches of images that have a different
    number of associated object annotations (bounding boxes).
    Arguments:
        batch: (tuple) A tuple of tensor images and lists of annotations
    Return:
        A tuple containing:
            1) (tensor) batch of images stacked on their 0 dim
            2) (list of tensors) annotations for a given image are stacked on
                                 0 dim
    """
    targets = []
    imgs = []
    for sample in batch:
        imgs.append(sample[0])
        targets.append(sample[1])
    return torch.stack(imgs, 0), targets        

def plot_embed_results(net, test_dataset, curr_epoch, total_epoch):

    num_img = len(test_dataset)
    net.eval()
    plt.figure(curr_epoch, figsize=[20,5], facecolor='gray', edgecolor='w')
    with torch.no_grad():
        plt.suptitle("test result of epoch: {}/{}".format(curr_epoch, total_epoch), fontsize=24)
        for k, data in enumerate(test_dataset):
            images, _ = data
            plt.subplot(2, num_img, k+1)
            plt.imshow(images.type(torch.uint8).permute(1,2,0))
            plt.axis('off')
            plt.title('raw img {}'.format(k+1))
            images = images.cuda()
            res = net(images[None])
            plt.subplot(2, num_img, k+1+num_img)
            plt.imshow(res[0][0].cpu(), cmap=plt.get_cmap('rainbow'))
            plt.colorbar(fraction=0.05, pad=0.02)
            plt.axis('off')
            plt.title('embed {}'.format(k+1)) 
    net.train()

def get_instance_embed_groups(embed_map, multi_img_instance_masks):
    '''
    Get the instance embeddings for each image
    Args:
        embed_map (Tensor: BxDxHxW):
        instace_mask (list[Tensor: IxHxW]):
    Return:
        embed_groups (list[list[Tensor]])
    '''
    embed_h, embed_w = embed_map.shape[2:]
    multi_img_embed_groups = []
    for k, instance_masks in enumerate(multi_img_instance_masks):
        instance_masks = instance_masks[None].to(embed_map.device)
        instance_masks = F.interpolate(instance_masks, (embed_h, embed_w))
        embed_groups = []
        for instance_mask in instance_masks[0]:
            ins_pos = instance_mask.detach().nonzero()
            feat = embed_map[k, :, ins_pos[:,0], ins_pos[:,1]]
            embed_groups.append(feat.T)
        multi_img_embed_groups.append(embed_groups)

    return multi_img_embed_groups

def get_intance_seg_targets(embed_map, multi_img_instance_masks):
    '''
    Get the instance embeddings for each image
    Args:
        embed_map (Tensor: BxDxHxW):
        instace_mask (list[Tensor: IxHxW]):
    Return:
        embed_groups (list[list[Tensor]])
    '''
    embed_h, embed_w = embed_map.shape[2:]
    multi_img_embed_groups = []
    for k, instance_masks in enumerate(multi_img_instance_masks):
        instance_masks = instance_masks[None].to(embed_map.device)
        instance_masks = F.interpolate(instance_masks, (embed_h, embed_w))
        instance_target = instance_masks.sum(dim=1).ge(1).type(torch.float32)
        multi_img_embed_groups.append(instance_target)
    targets = torch.stack(multi_img_embed_groups)

    return targets

if __name__ == '__main__':

    cfg = {
        'model': {
            'pretrained_path': 'save/resnet50.pth',
            'resnet_type': 'resnet50',
            'embed_dims': 1,
            'local_root': 'save',
        },
        'dataset':{
            'num_train':256,
            'num_test':4,
            'img_size':(512, 512), 
            'bg_color':(0, 0, 0), 
            'target_color':(255, 255, 255)
        },
        'hyperparam':{
            'batch_size': 2,
            'num_workers': 0,
            'lr': 1e-4,
            'momentum': 0.9,
            'epoch': 20,
            'steps': None,
            'lr_schedule' : 'cosine',
            'gamma' : 0,
        }
    }
    train_model(cfg)

