'''
@author: zhangkai
@license: (C) Copyright 2017-2023
@contact: jeffcobile@gmail.com
@Software : PyCharm
@file: slover.py
@time: 2020-05-29 16:11:11
@desc: 
'''
from ELib.model.model_zoo import get_segmentation_model
from ELib.loss.loss_zoo import get_segmentation_loss
from ELib.sched.scheduler import WarmupPolyLR
from jjzhk.device import device
from ELib.dataset.dataset import VOCSegSet, COCOSegSet, ADE20KSegSet, CityScapesSegSet
import torchvision as tv
import torch
from jjzhk.drawseg import BaseDrawSeg
from jjzhk.progressbar import ProgressBar
from tabulate import tabulate
from jjzhk.logger import Logger
import os


class BelodonSolver(object):
    model_urls = {
        'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
        'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
        'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
        'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
        'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',

        'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',

        'resnet50c': 'https://github.com/JJZHK/AIModels/releases/download/2.0/resnet50_Seg.pth',
        'resnet101c': 'https://github.com/JJZHK/AIModels/releases/download/2.0/resnet101_Seg.pth',
        'resnet152c': 'https://github.com/JJZHK/AIModels/releases/download/2.0/resnet152_Seg.pth',

        'xception65': 'https://github.com/JJZHK/AIModels/releases/download/2.0/xception65_Seg.pth',
        'hrnet_w18_small_v1': 'https://github.com/JJZHK/AIModels/releases/download/2.0/hrnetw18smallv1_Seg.pth',
        'mobilenetv2': 'https://github.com/JJZHK/AIModels/releases/download/2.0/mobilenetV2_Seg.pth',
    }

    def __init__(self, cfg):
        self.cfg = cfg
        self._train_loader_ = None
        self._eval_loader_ = None
        self._test_loader_ = None
        self.logger = Logger(output="logger")
        self.train_path, self.eval_path, self.test_path, self.checkpoint_file, \
            self.loss_file, self.eval_file = self.logger.get_path_files()
        '''
        logger/train_logs
        logger/eval_logs
        logger/test_logs
        logger/train_logs/checkpoint.txt
        logger/train_logs/loss.txt
        logger/eval_logs/eval.txt
        '''
        self.model = get_segmentation_model(self.cfg).to(device)
        self.optimizer = None

    def train(self):
        if self._train_loader_ is None:
            train_dataset = self._init_dataset_('train')
            train_sampler = self.make_data_sampler(train_dataset, False)
            train_batch_sampler = self.make_batch_data_sampler(train_sampler, images_per_batch=self.cfg.TRAIN.BATCH_SIZE,
                                                              drop_last=True)
            self._train_loader_ = torch.utils.data.DataLoader(dataset=train_dataset,
                                                             batch_sampler=train_batch_sampler,
                                                             num_workers=0)
            self.optimizer = self._get_optimizer_(self.model)
            self.iters_per_epoch = len(train_dataset) // (1 * self.cfg.TRAIN.BATCH_SIZE)
            self.max_iters = self.cfg.TRAIN.MAX_EPOCHS * self.iters_per_epoch

        # for i, (images, masks, _) in enumerate(self._train_loader_):
        #     print(i)
        max_epochs = self.max_iters

        self.criterion = get_segmentation_loss(self.cfg).to(device)

        self.lr_scheduler = self._get_scheduler_(self.optimizer, max_iters=max_epochs,
                                                 iters_per_epoch=self.iters_per_epoch)

        previous = self._find_previous_()
        if previous:
            start_epoch = previous[0][-1]
            self._resume_checkpoint_(previous[1][-1])
        else:
            start_epoch = self._init_weights_()
        bar = ProgressBar(self.cfg.TRAIN.MAX_EPOCHS, len(self._train_loader_), "Loss:%.3f;Avg Loss:%.3f;LR:%.6f")

        print("start training...")
        for epoch in range(start_epoch + 1, max_epochs + 1):
            self.model.train()
            for param_group in self.optimizer.param_groups:
                newir = param_group['lr']

            avg_loss_per_epoch, time = self._train_epoch_(epoch, newir, bar)

            if self.cfg.TRAIN.PHASE.EVALITER is not None and self.cfg.TRAIN.PHASE.EVALITER is not 0 and epoch % self.cfg.TRAIN.PHASE.EVALITER == 0:
                self._eval_epoch_(self.model, epoch)

            if self.cfg.TRAIN.PHASE.TESTITER is not None and self.cfg.TRAIN.PHASE.TESTITER is not 0 and epoch % self.cfg.TRAIN.PHASE.TESTITER == 0:
                self._test_epoch_(self.model, epoch)

            resume_checkpoints = {
                'state_dict' : self.model.module.state_dict() if hasattr(self.model, 'module') else self.model.state_dict(),
                'lr_scheduler' : self.lr_scheduler.state_dict(),
                'optimizer' : self.optimizer.state_dict()
            }
            self.logger.save_checkpoints_file(epoch, resume_checkpoints)
            self.logger.logger("epoch %d loss: %.3f; time: %d" % (epoch, avg_loss_per_epoch, time), phase='l')

    def eval(self):
        print("loading weights from %s" % self.cfg.TEST.WEIGHTS)
        self._resume_checkpoint_(self.cfg.EVAL.WEIGHTS)
        self._eval_epoch_(self.model, 0)

    def test(self):
        print("loading weights from %s" % self.cfg.TEST.WEIGHTS)
        self._resume_checkpoint_(self.cfg.TEST.WEIGHTS)
        self._test_epoch_(self.model, 0)

    def _eval_epoch_(self, model, epoch):
        if self._eval_loader_ is None:
            self.metric = SegmentationMetric(self.cfg.BASE.NUM_CLASSES)
            eval_dataset = self._init_dataset_('eval')
            eval_sampler = self.make_data_sampler(eval_dataset, False)
            eval_batch_sampler = self.make_batch_data_sampler(eval_sampler, images_per_batch=self.cfg.EVAL.BATCH_SIZE,
                                                              drop_last=False)
            self._eval_loader_ = torch.utils.data.DataLoader(dataset=eval_dataset,
                                                             batch_sampler=eval_batch_sampler,
                                                             num_workers=0)
            if model.encoder is not None:
                self._set_batch_norm_attr_(model.encoder.named_modules(), 'eps', self.cfg.BASE.BN_EPS_FOR_ENCODER)
        model.eval()

        bar = ProgressBar(1, len(self._eval_loader_), "validation pixAcc: %.3f, mIoU: %.3f")
        for index, (image, masks, infos) in enumerate(self._eval_loader_):
            image = image.to(device)
            masks = masks.to(device)

            with torch.no_grad():
                output = model.evaluate(image)

            self.metric.update(output, masks)
            pixAcc, mIoU = self.metric.get()
            bar.show(1, pixAcc * 100, mIoU * 100)

        pixAcc, mIoU, category_iou = self.metric.get(return_category_iou=True)
        self.logger.logger("epoch %d, validation pixAcc : %.3f, mIoU: %.3f" % (epoch, pixAcc * 100, mIoU * 100), phase='e')
        headers = ['class id', 'class name', 'iou']
        table = []
        for i, cls_name in enumerate(self.cfg.BASE.CLASSINFO.keys()):
            table.append([cls_name, category_iou[i]])

        with open(os.path.join(self.eval_path, "%d.txt" % epoch), 'w') as f:
            f.write(tabulate(table, headers, tablefmt='grid', showindex="always",
                                                           numalign='center', stralign='center'))

    def _test_epoch_(self, model, epoch):
        if self._test_loader_ is None:
            test_dataset = self._init_dataset_('test')
            test_sampler = self.make_data_sampler(test_dataset, False)
            test_batch_sampler = self.make_batch_data_sampler(test_sampler, images_per_batch=self.cfg.TEST.BATCH_SIZE,
                                                        drop_last=False)

            self._test_loader_ = torch.utils.data.DataLoader(dataset=test_dataset,
                                                             batch_sampler=test_batch_sampler,
                                                             num_workers=0
                                                             )
            if model.encoder is not None:
                self._set_batch_norm_attr_(model.encoder.named_modules(), 'eps', self.cfg.BASE.BN_EPS_FOR_ENCODER)
        model.eval()

        draw = BaseDrawSeg(self.cfg, os.path.join(self.test_path, "%s" % epoch))
        bar = ProgressBar(1, len(self._test_loader_), "%d")
        for index, (image, info) in enumerate(self._test_loader_):
            images = image.to(device)
            with torch.no_grad():
                output = model(images)
            pred = torch.argmax(output[0], 1).squeeze(0).cpu().data.numpy()
            draw.drawImage(param={
                "Image" : info["path"][0],
                "Mask" : pred,
                "ImageName" : info["img_id"][0]
            }, drawType=1)
            # draw.drawByImage(info["path"][0], boxes=None, mask=pred,
            #                  imagename=info["img_id"][0])
            bar.show(1, index + 1)

    def _train_epoch_(self, epoch,newir, bar):
        avg_loss = 0
        time = 0
        i = 0
        for i, (images, masks, _) in enumerate(self._train_loader_):
            images = images.to(device)
            targets = masks.to(device)

            outputs = self.model(images)

            loss_dict = self.criterion(outputs, targets)
            losses = sum(loss for loss in loss_dict.values())
            avg_loss += losses.item()

            self.optimizer.zero_grad()
            losses.backward()
            self.optimizer.step()
            self.lr_scheduler.step()

            time = bar.show(epoch, losses.item(), avg_loss / (i + 1), newir)

        return avg_loss / (i + 1), time

    def make_data_sampler(self, dataset, shuffle):
        if shuffle:
            sampler = torch.utils.data.sampler.RandomSampler(dataset)
        else:
            sampler = torch.utils.data.sampler.SequentialSampler(dataset)
        return sampler

    def make_batch_data_sampler(self, sampler, images_per_batch, num_iters=None, start_iter=0, drop_last=True):
        batch_sampler = torch.utils.data.sampler.BatchSampler(sampler, images_per_batch, drop_last=drop_last)
        if num_iters is not None:
            batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iters, start_iter)
        return batch_sampler

    def _set_batch_norm_attr_(self, named_modules, attr, value):
        for m in named_modules:
            if isinstance(m[1], torch.nn.BatchNorm2d) or isinstance(m[1], torch.nn.SyncBatchNorm):
                setattr(m[1], attr, value)

    def _init_dataset_(self, phase):
        if self.cfg.BASE.DATA_TYPE == 'voc':
            return VOCSegSet(self.cfg, phase=phase, transform=tv.transforms.Compose([
                tv.transforms.ToTensor(),
                tv.transforms.Normalize(self.cfg.BASE.MEAN, self.cfg.BASE.STD),
            ]))
        elif self.cfg.BASE.DATA_TYPE == 'coco':
            return COCOSegSet(self.cfg, phase=phase, transform=tv.transforms.Compose([
                tv.transforms.ToTensor(),
                tv.transforms.Normalize(self.cfg.BASE.MEAN, self.cfg.BASE.STD),
            ]))
        elif self.cfg.BASE.DATA_TYPE == 'ade20k':
            return ADE20KSegSet(self.cfg, phase=phase, transform=tv.transforms.Compose([
                tv.transforms.ToTensor(),
                tv.transforms.Normalize(self.cfg.BASE.MEAN, self.cfg.BASE.STD),
            ]))
        elif self.cfg.BASE.DATA_TYPE == 'cityscapes':
            return CityScapesSegSet(self.cfg, phase=phase, transform=tv.transforms.Compose([
                tv.transforms.ToTensor(),
                tv.transforms.Normalize(self.cfg.BASE.MEAN, self.cfg.BASE.STD),
            ]))

    def _find_previous_(self):
        if not os.path.exists(self.checkpoint_file):
            return False

        with open(self.checkpoint_file, 'r') as f:
            lineList = f.readlines()

        if lineList == []:
            return False

        epoches, resume_checkpoints = [list() for _ in range(2)]
        for line in lineList:
            epoch = int(line[line.find('epoch ') + len('epoch '): line.find(':')])
            checkpoint = line[line.find(':') + 2:-1]
            epoches.append(epoch)
            resume_checkpoints.append(checkpoint)
        return epoches, resume_checkpoints

    def _init_weights_(self):
        if self.cfg.TRAIN.WEIGHTS and self.cfg.TRAIN.WEIGHTS is not '':
            self._resume_checkpoint_(os.path.join(self.cfg.BASE.WEIGHTS_ROOT, self.cfg.TRAIN.WEIGHTS), justInitBase=True)
        else:
            self._resume_checkpoint_('',justInitBase=True)

        return 0

    def _resume_checkpoint_(self, weights, justInitBase=False):
        ''' 调用这个方法的四种情况
                1. 加载pretrained model时调用 justInitBase = True, 模型从网络上下载
                2. 模型继续运行时的调用 justInitBase=False,模型在logger中
                3. eval方法时调用 justInitBase=False, 模型从网络下载
                4. test方法时调用 justInitBase=False, 模型从网络下载
                '''
        print("loading backbone weights...")

        if justInitBase:  # 第1种情况
            self._load_backbone_weights_()
        else:
            if 'https://' in weights:
                checkpoint = torch.utils.model_zoo.load_url(weights, map_location='cpu')
                if 'optimizer' in checkpoint.keys():
                    self.optimizer.load_state_dict(checkpoint['optimizer'])
                if 'lr_scheduler' in checkpoint.keys():
                    self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])

                self.model.load_init_weights(checkpoint)

            else:
                self.model.load_init_weights(torch.load(weights, map_location='cpu'))

    def _get_optimizer_(self, model):
        parameters = self._get_paramters_(model)
        opt_lower = self.cfg.TRAIN.SOLVER.OPTIMIZER.lower()

        if opt_lower == 'sgd':
            optimizer = torch.optim.SGD(
                parameters, lr=self.cfg.TRAIN.SOLVER.LR, momentum=self.cfg.TRAIN.SOLVER.MOMENTUM,
                weight_decay=self.cfg.TRAIN.SOLVER.WEIGHT_DECAY)
        elif opt_lower == 'adam':
            optimizer = torch.optim.Adam(
                parameters, lr=self.cfg.TRAIN.SOLVER.LR, eps=self.cfg.TRAIN.SOLVER.EPSILON,
                weight_decay=self.cfg.TRAIN.SOLVER.WEIGHT_DECAY)
        elif opt_lower == 'adadelta':
            optimizer = torch.optim.Adadelta(
                parameters, lr=self.cfg.TRAIN.SOLVER.LR, eps=self.cfg.TRAIN.SOLVER.EPSILON,
                weight_decay=self.cfg.TRAIN.SOLVER.WEIGHT_DECAY)
        elif opt_lower == 'rmsprop':
            optimizer = torch.optim.RMSprop(
                parameters, lr=self.cfg.TRAIN.SOLVER.LR, alpha=0.9, eps=self.cfg.TRAIN.SOLVER.EPSILON,
                momentum=self.cfg.TRAIN.SOLVER.MOMENTUM, weight_decay=self.cfg.TRAIN.SOLVER.WEIGHT_DECAY)
        else:
            raise ValueError("Expected optimizer method in [sgd, adam, adadelta, rmsprop], but received "
                             "{}".format(opt_lower))

        return optimizer

    def _get_paramters_(self, model):
        params_list = list()
        if hasattr(model, 'encoder') and model.encoder is not None and hasattr(model, 'decoder'):
            params_list.append({'params': model.encoder.parameters(), 'lr': self.cfg.TRAIN.SOLVER.LR})
            self._set_batch_norm_attr_(model.encoder.named_modules(), 'eps', self.cfg.BASE.BN_EPS_FOR_ENCODER)

            for module in model.decoder:
                params_list.append({'params': getattr(model, module).parameters(),
                                    'lr': self.cfg.TRAIN.SOLVER.LR * self.cfg.TRAIN.SOLVER.DECODER_LR_FACTOR})
        else:
            params_list = model.parameters()
        return params_list

    def _get_scheduler_(self, optimizer, max_iters, iters_per_epoch):
        mode = self.cfg.TRAIN.SOLVER.LR_SCHEDULER.lower()
        warm_up_iters = iters_per_epoch * self.cfg.TRAIN.SOLVER.WARMUP.EPOCHS
        if mode == 'poly':
            return WarmupPolyLR(optimizer, max_iters=max_iters, power=0.9,
                                warmup_factor=self.cfg.TRAIN.SOLVER.WARMUP.FACTOR, warmup_iters=warm_up_iters,
                                warmup_method=self.cfg.TRAIN.SOLVER.WARMUP.METHOD)
        # elif mode == 'cosine':
        #     return WarmupCosineLR(optimizer, max_iters=max_iters, warmup_factor=self.cfg.SOLVER.WARMUP.FACTOR,
        #                           warmup_iters=warm_up_iters, warmup_method=self.cfg.SOLVER.WARMUP.METHOD)
        # elif mode == 'step':
        #     milestones = [x * iters_per_epoch for x in self.cfg.SOLVER.STEP.DECAY_EPOCH]
        #     return WarmupMultiStepLR(optimizer, milestones=milestones, gamma=self.cfg.SOLVER.STEP.GAMMA,
        #                              warmup_factor=self.cfg.SOLVER.WARMUP.FACTOR, warmup_iters=warm_up_iters,
        #                              warmup_method=self.cfg.SOLVER.WARMUP.METHOD)
        else:
            raise ValueError("not support lr scheduler method!")

    def _load_backbone_weights_(self):
        backbone_name = self.cfg.MODEL.BACKBONE
        if backbone_name in self.model_urls.keys():
            if (hasattr(self.cfg.MODEL, "BASEMODEL")):
                self.model.backbone.encoder.load_weights(torch.utils.model_zoo.load_url(self.model_urls[backbone_name]))
            else:
                self.model.encoder.load_weights(torch.utils.model_zoo.load_url(self.model_urls[backbone_name]))


class SegmentationMetric(object):
    """Computes pixAcc and mIoU metric scores
    """

    def __init__(self, nclass):
        super(SegmentationMetric, self).__init__()
        self.nclass = nclass
        self.reset()

    def update(self, preds, labels):
        """Updates the internal evaluation result.

        Parameters
        ----------
        labels : 'NumpyArray' or list of `NumpyArray`
            The labels of the data.
        preds : 'NumpyArray' or list of `NumpyArray`
            Predicted values.
        """

        def evaluate_worker(self, pred, label):
            correct, labeled = self.batch_pix_accuracy(pred, label)
            inter, union = self.batch_intersection_union(pred, label, self.nclass)

            # torch.cuda.synchronize()
            self.total_correct += correct.item()
            self.total_label += labeled.item()
            if self.total_inter.device != inter.device:
                self.total_inter = self.total_inter.to(inter.device)
                self.total_union = self.total_union.to(union.device)
            self.total_inter += inter
            self.total_union += union

        if isinstance(preds, torch.Tensor):
            evaluate_worker(self, preds, labels)
        elif isinstance(preds, (list, tuple)):
            for (pred, label) in zip(preds, labels):
                evaluate_worker(self, pred, label)

    def get(self, return_category_iou=False):
        """Gets the current evaluation result.

        Returns
        -------
        metrics : tuple of float
            pixAcc and mIoU
        """
        pixAcc = 1.0 * self.total_correct / (2.220446049250313e-16 + self.total_label)  # remove np.spacing(1)
        IoU = 1.0 * self.total_inter / (2.220446049250313e-16 + self.total_union)
        mIoU = IoU.mean().item()
        if return_category_iou:
            return pixAcc, mIoU, IoU.cpu().numpy()
        return pixAcc, mIoU

    def reset(self):
        """Resets the internal evaluation result to initial state."""
        self.total_inter = torch.zeros(self.nclass)
        self.total_union = torch.zeros(self.nclass)
        self.total_correct = 0
        self.total_label = 0

    def batch_pix_accuracy(self, output, target):
        """PixAcc"""
        # inputs are numpy array, output 4D, target 3D
        predict = torch.argmax(output.long(), 1) + 1
        target = target.long() + 1

        pixel_labeled = torch.sum(target > 0)  # .item()
        pixel_correct = torch.sum((predict == target) * (target > 0))  # .item()
        assert pixel_correct <= pixel_labeled, "Correct area should be smaller than Labeled"
        return pixel_correct, pixel_labeled

    def batch_intersection_union(self, output, target, nclass):
        """mIoU"""
        # inputs are numpy array, output 4D, target 3D
        mini = 1
        maxi = nclass
        nbins = nclass
        predict = torch.argmax(output, 1) + 1
        target = target.float() + 1

        predict = predict.float() * (target > 0).float()
        intersection = predict * (predict == target).float()
        # areas of intersection and union
        # element 0 in intersection occur the main difference from np.bincount. set boundary to -1 is necessary.
        area_inter = torch.histc(intersection.cpu(), bins=nbins, min=mini, max=maxi)
        area_pred = torch.histc(predict.cpu(), bins=nbins, min=mini, max=maxi)
        area_lab = torch.histc(target.cpu(), bins=nbins, min=mini, max=maxi)
        area_union = area_pred + area_lab - area_inter
        assert torch.sum(area_inter > area_union).item() == 0, "Intersection area should be smaller than Union area"
        return area_inter.float(), area_union.float()


class IterationBasedBatchSampler(torch.utils.data.sampler.BatchSampler):
    """
    Wraps a BatchSampler, resampling from it until
    a specified number of iterations have been sampled
    """

    def __init__(self, batch_sampler, num_iterations, start_iter=0):
        self.batch_sampler = batch_sampler
        self.num_iterations = num_iterations
        self.start_iter = start_iter

    def __iter__(self):
        iteration = self.start_iter
        while iteration <= self.num_iterations:
            # if the underlying sampler has a set_epoch method, like
            # DistributedSampler, used for making each process see
            # a different split of the dataset, then set it
            if hasattr(self.batch_sampler.sampler, "set_epoch"):
                self.batch_sampler.sampler.set_epoch(iteration)
            for batch in self.batch_sampler:
                iteration += 1
                if iteration > self.num_iterations:
                    break
                yield batch

    def __len__(self):
        return self.num_iterations