import os
import copy
import torch
import random
import warnings
import argparse
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
from modules import (
    BuildDataloader, touchdir, loadckpts, saveckpts, judgefileexist, postprocesspredgtpairs,
    BuildDataset,  BuildSegmentor, BuildOptimizer, BuildScheduler, Logger, ConfigParser
)
warnings.filterwarnings('ignore')


'''parse arguments in command line'''
def parsecmdargs():
    parser = argparse.ArgumentParser(description='SSSegmentation is an open source supervised semantic segmentation toolbox based on PyTorch')
    parser.add_argument('--device', dest='device', help='cuda device, i.e. 0 or 0,1,2,3', default=0)
    parser.add_argument('--cfgfilepath', dest='cfgfilepath', help='config file path you want to use', type=str, required=True)
    parser.add_argument('--ckptspath', dest='ckptspath', help='checkpoints you want to resume from', default='', type=str)
    cmd_args = parser.parse_args()
    return cmd_args


'''Trainer'''
class Trainer():
    def __init__(self, cfg,logger_handle, cmd_args, cfg_file_path):
        # set attribute
        self.cfg = cfg
        self.logger_handle = logger_handle
        self.cmd_args = cmd_args
        self.cfg_file_path = cfg_file_path
        assert torch.cuda.is_available(), 'cuda is not available'
    '''start trainer'''
    def start(self):
        best_result = None
        cfg, logger_handle, cmd_args, cfg_file_path = self.cfg, self.logger_handle, self.cmd_args, self.cfg_file_path
        # build dataset and dataloader
        dataset = BuildDataset(mode='TRAIN', logger_handle=logger_handle, dataset_cfg=cfg.SEGMENTOR_CFG['dataset'])
        assert dataset.num_classes == cfg.SEGMENTOR_CFG['num_classes'], 'parsed config file %s error' % cfg_file_path
        dataloader_cfg = copy.deepcopy(cfg.SEGMENTOR_CFG['dataloader'])
        # auto_adapt_to_expected_train_bs = dataloader_cfg.pop('auto_adapt_to_expected_train_bs')
        # expected_total_train_bs_for_assert = dataloader_cfg.pop('expected_total_train_bs_for_assert')
        # if auto_adapt_to_expected_train_bs:
        #     dataloader_cfg['train']['batch_size_per_gpu'] = expected_total_train_bs_for_assert
        # dataloader_cfg['train']['batch_size'], dataloader_cfg['train']['num_workers'] = dataloader_cfg['train'].pop('batch_size_per_gpu'), dataloader_cfg['train'].pop('num_workers_per_gpu')
        # dataloader_cfg['test']['batch_size'], dataloader_cfg['test']['num_workers'] = dataloader_cfg['test'].pop('batch_size_per_gpu'), dataloader_cfg['test'].pop('num_workers_per_gpu')
        # assert expected_total_train_bs_for_assert == dataloader_cfg['train']['batch_size']
        dataloader = BuildDataloader(dataset=dataset, dataloader_cfg=dataloader_cfg['train'])
        # build segmentor
        segmentor = BuildSegmentor(segmentor_cfg=copy.deepcopy(cfg.SEGMENTOR_CFG), mode='TRAIN')
        torch.cuda.set_device(cmd_args.device)
        segmentor.cuda(cmd_args.device)
        torch.backends.cudnn.benchmark = cfg.SEGMENTOR_CFG['benchmark']
        # build optimizer
        optimizer = BuildOptimizer(segmentor, cfg.SEGMENTOR_CFG['scheduler']['optimizer'])
        # build fp16
        fp16_cfg = self.cfg.SEGMENTOR_CFG.get('fp16_cfg', {'type': None})
        fp16_type, grad_scaler = fp16_cfg.pop('type'), None
        assert fp16_type in [None, 'pytorch']
        if fp16_type in ['pytorch']:
            from torch.cuda.amp import autocast
            from torch.cuda.amp import GradScaler
            from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
            grad_scaler = GradScaler(**fp16_cfg['grad_scaler'])
        # build scheduler
        scheduler_cfg = copy.deepcopy(cfg.SEGMENTOR_CFG['scheduler'])
        scheduler_cfg.update({
            'lr': cfg.SEGMENTOR_CFG['scheduler']['optimizer']['lr'],
            'iters_per_epoch': len(dataloader),
            'params_rules': cfg.SEGMENTOR_CFG['scheduler']['optimizer']['params_rules'],
        })
        scheduler = BuildScheduler(optimizer=optimizer, scheduler_cfg=scheduler_cfg)
        start_epoch, end_epoch = 1, scheduler_cfg['max_epochs']
        # load ckpts
        if cmd_args.ckptspath and judgefileexist(cmd_args.ckptspath):
            ckpts = loadckpts(cmd_args.ckptspath)
            try:
                segmentor.load_state_dict(ckpts['model'])
            except Exception as e:
                logger_handle.warning(str(e) + '\n' + 'Try to load ckpts by using strict=False')
                segmentor.load_state_dict(ckpts['model'], strict=False)
            if 'optimizer' in ckpts:
                optimizer.load_state_dict(ckpts['optimizer'])
            if 'cur_epoch' in ckpts:
                start_epoch = ckpts['cur_epoch'] + 1
                scheduler.setstate({'cur_epoch': ckpts['cur_epoch'], 'cur_iter': ckpts['cur_iter']})
                assert ckpts['cur_iter'] == len(dataloader) * ckpts['cur_epoch']
            if 'miou' in ckpts:
                best_result = ckpts['miou']
            if 'grad_scaler' in ckpts and fp16_type in ['pytorch']:
                grad_scaler.load_state_dict(ckpts['grad_scaler'])
        else:
            cmd_args.ckptspath = ''
        # parallel segmentor
        build_dist_model_cfg = self.cfg.SEGMENTOR_CFG.get('build_dist_model_cfg', {})
        build_dist_model_cfg.update({'device_ids': [cmd_args.device]})
        #segmentor = BuildDistributedModel(segmentor, build_dist_model_cfg)
        if fp16_type in ['pytorch']:
            segmentor.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
        # print config
        logger_handle.info(f'Config file path: {cfg_file_path}')
        logger_handle.info(f'Config details: \n{cfg.SEGMENTOR_CFG}')
        logger_handle.info(f'Resume from: {cmd_args.ckptspath}')
        # start to train the segmentor
        FloatTensor, losses_log_dict_memory = torch.cuda.FloatTensor, {}
        for epoch in range(start_epoch, end_epoch+1):
            # --set train
            segmentor.train()
            #dataloader.sampler.set_epoch(epoch)
            # --train epoch
            for batch_idx, samples_meta in enumerate(dataloader):
                learning_rate = scheduler.updatelr()
                images = samples_meta['image'].type(FloatTensor)
                targets = {'seg_target': samples_meta['seg_target'].type(FloatTensor)}
                if 'edge_target' in samples_meta: targets['edge_target'] = samples_meta['edge_target'].type(FloatTensor)
                if 'bigmodeloutput' in samples_meta: targets['bigmodeloutput'] = samples_meta['bigmodeloutput'].type(FloatTensor)
                optimizer.zero_grad()
                forward_kwargs = {'learning_rate': learning_rate, 'epoch': epoch} if cfg.SEGMENTOR_CFG['type'] in ['MCIBI', 'MCIBIPlusPlus'] else {}
                if fp16_type in ['pytorch']:
                    with autocast(**fp16_cfg['autocast']):
                        loss, losses_log_dict = segmentor(images, targets, **forward_kwargs)
                else:
                    loss, losses_log_dict = segmentor(images, targets, **forward_kwargs)
                for key, value in losses_log_dict.items():
                    if key in losses_log_dict_memory:
                        losses_log_dict_memory[key].append(value)
                    else:
                        losses_log_dict_memory[key] = [value]
                if fp16_type in ['pytorch']:
                    grad_scaler.scale(loss).backward()
                else:
                    loss.backward()
                scheduler.step(grad_scaler=grad_scaler)
                if (scheduler.cur_iter % cfg.SEGMENTOR_CFG['log_interval_iterations'] == 0):
                    print_log = {
                        'cur_epoch': epoch, 'max_epochs': end_epoch, 'cur_iter': scheduler.cur_iter, 'max_iters': scheduler.max_iters,
                        'cur_iter_in_cur_epoch': batch_idx+1, 'max_iters_in_cur_epoch': len(dataloader), 'segmentor': cfg.SEGMENTOR_CFG['type'],
                        'backbone': cfg.SEGMENTOR_CFG['backbone']['structure_type'], 'dataset': cfg.SEGMENTOR_CFG['dataset']['type'],
                        'learning_rate': learning_rate,
                    }
                    for key in list(losses_log_dict_memory.keys()):
                        # print(key)
                        # print(losses_log_dict_memory[key])
                        print_log[key] = sum(losses_log_dict_memory[key]) / len(losses_log_dict_memory[key])
                    logger_handle.info(print_log)
                    losses_log_dict_memory = dict()
            scheduler.cur_epoch = epoch
            # --save ckpts
            if (epoch % cfg.SEGMENTOR_CFG['save_interval_epochs'] == 0) or (epoch == end_epoch):
                state_dict = scheduler.state()
                state_dict['model'] = segmentor.state_dict()
                if fp16_type in ['pytorch']:
                    state_dict['grad_scaler'] = grad_scaler.state_dict()
                savepath = os.path.join(cfg.SEGMENTOR_CFG['work_dir'], 'epoch_%s.pth' % epoch)
                saveckpts(state_dict, savepath)
            # --eval ckpts
            if (epoch % cfg.SEGMENTOR_CFG['eval_interval_epochs'] == 0) or (epoch == end_epoch):
                self.logger_handle.info(f'Evaluate {cfg.SEGMENTOR_CFG["type"]} at epoch {epoch}')
                result = self.evaluate(segmentor)
                if best_result is None:
                    best_result = 0
                if result["miou"] > best_result:
                    best_result = result["miou"]
                    state_dict = scheduler.state()
                    state_dict['model'] = segmentor.state_dict()
                    state_dict["miou"] = result["miou"]
                    if fp16_type in ['pytorch']:
                        state_dict['grad_scaler'] = grad_scaler.state_dict()
                    savepath = os.path.join(cfg.SEGMENTOR_CFG['work_dir'], 'best_epoch_%s.pth' % epoch)
                    saveckpts(state_dict, savepath)
    '''evaluate'''
    def evaluate(self, segmentor):
        cfg, logger_handle, cmd_args, cfg_file_path = self.cfg, self.logger_handle, self.cmd_args, self.cfg_file_path
        # build dataset and dataloader
        dataset = BuildDataset(mode='TEST', logger_handle=logger_handle, dataset_cfg=cfg.SEGMENTOR_CFG['dataset'])
        assert dataset.num_classes == cfg.SEGMENTOR_CFG['num_classes'], 'parsed config file %s error' % cfg_file_path
        dataloader_cfg = copy.deepcopy(cfg.SEGMENTOR_CFG['dataloader'])
        # dataloader_cfg['test']['batch_size'], dataloader_cfg['test']['num_workers'] = dataloader_cfg['test'].pop('batch_size_per_gpu'), dataloader_cfg['test'].pop('num_workers_per_gpu')
        dataloader = BuildDataloader(dataset=dataset, dataloader_cfg=dataloader_cfg['test'])
        # start to eval
        segmentor.eval()
        segmentor.mode = 'TEST'
        inference_cfg, all_preds, all_gts = cfg.SEGMENTOR_CFG['inference'], [], []
        with torch.no_grad():
            #dataloader.sampler.set_epoch(0)
            pbar = tqdm(enumerate(dataloader))
            for batch_idx, samples_meta in pbar:
                pbar.set_description('Processing %s/%s in device %s' % (batch_idx+1, len(dataloader), cmd_args.device))
                imageids, images, widths, heights, gts = samples_meta['id'], samples_meta['image'], samples_meta['width'], samples_meta['height'], samples_meta['seg_target']
                if 'bigmodeloutput' in samples_meta:
                    targets = {'bigmodeloutput': samples_meta['bigmodeloutput'].type(torch.cuda.FloatTensor)}
                    outputs = segmentor.inference(images,{'targets':targets})
                else:
                    outputs = segmentor.inference(images)
                for idx in range(len(outputs)):
                    output = F.interpolate(outputs[idx: idx+1], size=(heights[idx], widths[idx]), mode='bilinear', align_corners=segmentor.align_corners)
                    pred = (torch.argmax(output[0], dim=0)).cpu().numpy().astype(np.int32)
                    all_preds.append([imageids[idx], pred])
                    gt = gts[idx].cpu().numpy().astype(np.int32)
                    gt[gt >= dataset.num_classes] = -1
                    all_gts.append(gt)
        # post process
        all_preds, all_gts, all_ids = postprocesspredgtpairs(all_preds=all_preds, all_gts=all_gts, cmd_args=cmd_args, cfg=cfg, logger_handle=logger_handle)
        result = dataset.evaluate(
            seg_preds=all_preds, seg_targets=all_gts, metric_list=inference_cfg.get('metric_list', ['iou', 'miou']),
            num_classes=cfg.SEGMENTOR_CFG['num_classes'], ignore_index=-1,
        )
        logger_handle.info(result)
        segmentor.train()
        segmentor.mode = 'TRAIN'
        return result
'''main'''
def main():
    # parse arguments
    cmd_args, config_parser = parsecmdargs(), ConfigParser()
    cfg, cfg_file_path = config_parser(cmd_args.cfgfilepath)

    #touch work dir
    touchdir(cfg.SEGMENTOR_CFG['work_dir'])
    config_parser.save(cfg.SEGMENTOR_CFG['work_dir'])
    # initialize logger_handle
    logger_handle = Logger(cfg.SEGMENTOR_CFG['logfilepath'])
    # instanced Trainer
    client = Trainer(cfg=cfg, logger_handle=logger_handle, cmd_args=cmd_args, cfg_file_path=cfg_file_path)
    client.start()

'''debug'''
if __name__ == '__main__':
    main()
