'''
@author: zhangkai
@license: (C) Copyright 2017-2023
@contact: jeffcobile@gmail.com
@Software : PyCharm
@file: segbasemodel.py
@time: 2020-06-16 09:59:17
@desc: 
'''
import torch
from jjzhk.config import ZKCFG
from jjzhk.device import device
from ELib.backbone.backbone_zoo import get_backbone
import numpy as np


class SegBaseModel(torch.nn.Module):
    r"""Base Model for Semantic Segmentation
    """
    def __init__(self,cfg:ZKCFG, need_backbone=True):
        super(SegBaseModel, self).__init__()
        self.cfg = cfg
        self.backbone_name = None
        self.backbone = None
        if need_backbone:
            self.get_backbone()

    def get_backbone(self):
        self.backbone_name = self.cfg.MODEL.BACKBONE.lower()
        self.backbone = get_backbone(self.cfg, self.backbone_name)()

    def forward(self,input, **kwargs):
        pass

    def get_detections(self,image, **kwargs):
        with torch.no_grad():
            image = torch.autograd.Variable(torch.FloatTensor(image))

        image = image.to(device)
        pred = self.forward(image, phase='eval')
        detections = kwargs["detector"].forward(pred)

        return detections

    def get_eval_predictions(self,info, detections):
        w, h = info[0]['width'], info[0]['height']

        re_boxes = [[] for _ in range(len(self.cfg.BASE.CLASSINFO.keys()) + 1)]
        scale = [w, h, w, h]
        for j in range(1, detections.size(1)):
            cls_dets = list()
            for det in detections[0][j]:
                if det[0] > 0:
                    d = det.cpu().numpy()
                    scores, boxes = d[0], d[1:]
                    boxes *= scale
                    boxes = np.append(boxes, scores)
                    cls_dets.append(boxes)
            re_boxes[j] = cls_dets
        return re_boxes

    def get_predict(self, image, info, **kwargs):
        detections = self.get_detections(image, **kwargs)

        result_box = []

        for p, detection in enumerate(detections):
            image_id = info[p]["img_id"]
            height, width = info[p]["height"], info[p]["width"]
            scale = torch.Tensor([width, height, width, height])
            result = []

            for i in range(detections.size(1)):
                j = 0
                while detections[p, i, j, 0] >= self.cfg.BASE.IOU_THRESHOLD:
                    score = detections[p, i, j, 0]
                    label_name = self.cfg.classname(i - 1)
                    pt = (detections[p, i, j, 1:] * scale).cpu().numpy()
                    result.append(
                        [
                            (pt[0], pt[1]),
                            (pt[2], pt[3]),
                            label_name,
                            image_id,
                            score
                        ]
                    )
                    j += 1
            result_box.append(result)
        return result_box

    def load_init_weights(self, weights):
        if 'state_dict' in weights.keys():
            self.load_state_dict(weights['state_dict'])
        else:
            self.load_state_dict(weights)