# -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
-------------------------------------------------
   File Name：     utils
   Description :   
   Author :       lth
   date：          2022/1/27
-------------------------------------------------
   Change Activity:
                   2022/1/27 13:35: create this script
-------------------------------------------------
"""
__author__ = 'lth'

from typing import List

import cv2
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn


class Line:
    def __init__(self, x1, y1, x2, y2, v):
        # v = Line.check_angle(x1, y1, x2, y2)
        if v:
            self.x1 = x1
            self.y1 = min(y1, y2)
            self.x2 = x2
            self.y2 = max(y1, y2)
        else:
            self.x1 = min(x1, x2)
            self.y1 = y1
            self.x2 = max(x1, x2)
            self.y2 = y2

    @property
    def get_center_x(self):
        return (self.x1 + self.x2) / 2

    @property
    def get_center_y(self):
        return (self.y1 + self.y2) / 2

    def get_tail(self):
        return self.x2, self.y2

    def get_tail_reverse(self):
        return self.y2, self.x2

    @staticmethod
    def check_angle(x1, y1, x2, y2):
        eps = 1e-5
        a = np.abs(x1 - x2)
        b = np.abs(y1 - y2)
        tan_theta = np.arctan(a / (b + eps))
        # 将弧度转化为度
        rad = np.rad2deg(tan_theta)
        if np.abs(rad - 90) < 10:
            return True
        if np.abs(rad - 0) < 10:
            return False


def weights_init(net, init_type='normal', init_gain=0.001):
    def init_func(m):
        classname = m.__class__.__name__
        if hasattr(m, 'weight') and classname.find('Conv') != -1:
            if init_type == 'normal':
                torch.nn.init.normal_(m.weight.data, 0.0, init_gain)
            elif init_type == 'xavier':
                torch.nn.init.xavier_normal_(m.weight.data, gain=init_gain)
            elif init_type == 'kaiming':
                torch.nn.init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
            elif init_type == 'orthogonal':
                torch.nn.init.orthogonal_(m.weight.data, gain=init_gain)
            else:
                raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
        elif classname.find('BatchNorm2d') != -1:
            torch.nn.init.normal_(m.weight.data, 1.0, 0.02)
            torch.nn.init.constant_(m.bias.data, 0.0)

    print('initialize network with %s type' % init_type)
    net.apply(init_func)


class SegmentationLosses(nn.Module):
    def __init__(self, num_classes=8, mode='CE', weights=None,
                 ignore_index=255, gamma=2, alpha=0.5, reduction='mean'):
        super().__init__()
        self.gamma = gamma
        self.alpha = alpha
        self.mode = mode
        self.weights = weights
        self.ignore_index = ignore_index
        self.reduction = reduction
        self.num_classes = num_classes

    def forward(self, preds, target):
        """"""
        H1, W1 = preds.size()[2:]
        H2, W2 = target.size()[1:]
        assert H1 == H2 and W1 == W2

        if self.mode == 'CE':
            return self.CrossEntropyLoss(preds, target)
        elif self.mode == 'FL':
            return self.FocalLoss(preds, target)
        elif self.mode == 'Dice':
            return self.GeneralizedSoftDiceLoss(preds, target)
        elif self.mode == 'Dice2':
            return self.BatchSoftDeviceLoss(preds, target)
        elif self.mode == 'CE || Dice':
            loss = self.CrossEntropyLoss(preds, target) + \
                   self.GeneralizedSoftDiceLoss(preds, target)
            return loss
        else:
            raise NotImplementedError

    def CrossEntropyLoss(self, preds, target):
        """

        :param preds: Tensor of shape [N, C, H, W]
        :param target: Tensor of shape [N, H, W]
        :return:
        """
        device = target.device
        # if self.weights is not None:
        #     weight = self.weights.to(device)
        # else:
        #     arr = target.data.cpu().numpy().reshape(-1)
        #     weight = np.bincount(arr)
        #     weight = weight.astype(np.float)
        #     # weight = weight.sum() / weight
        #     weight = weight / weight.sum()
        #     median = np.median(weight)
        #     for i in range(weight.shape[0]):
        #         if int(weight[i]) == 0:
        #             continue
        #         weight[i] = median / weight[i]
        #     weight = torch.from_numpy(weight).to(device).float()

        return F.cross_entropy(preds, target, weight=self.weights, ignore_index=self.ignore_index)

    def FocalLoss(self, preds, target):
        """
        FL = alpha * (1 - pt) ** beta * log(pt)
        :param preds: Tensor of shape [N, C, H, W]
        :param target: Tensor of shape [N, H, W]
        :return:
        """
        logits = -F.cross_entropy(preds, target.long(),
                                  ignore_index=self.ignore_index)
        pt = torch.exp(logits)
        if self.alpha is not None:
            logits *= self.alpha
        loss = -((1 - pt) ** self.gamma) * logits

        return loss

    def GeneralizedSoftDiceLoss(self, preds, target):
        """
        Paper:
            https://arxiv.org/pdf/1606.04797.pdf
        :param preds: Tensor of shape [N, C, H, W]
        :param target: Tensor of shape [N, H, W]
        :return:
        """
        # overcome ignored label
        ignore = target.data.cpu() == self.ignore_index
        label = target.clone()
        label[ignore] = 0
        lb_one_hot = torch.zeros_like(preds).scatter_(1, label.unsqueeze(1), 1)
        ignore = ignore.nonzero(as_tuple=False)
        _, M = ignore.size()
        a, *b = ignore.chunk(M, dim=1)
        lb_one_hot[[a, torch.arange(lb_one_hot.size(1)).long(), *b]] = 0
        lb_one_hot = lb_one_hot.detach()

        # compute loss
        probs = torch.sigmoid(preds)
        numer = torch.sum((probs * lb_one_hot), dim=(2, 3))
        denom = torch.sum(probs.pow(1) + lb_one_hot.pow(1), dim=(2, 3))
        if not self.weights is None:
            numer = numer * self.weights.view(1, -1)
            denom = denom * self.weights.view(1, -1)
        numer = torch.sum(numer, dim=1)
        denom = torch.sum(denom, dim=1)
        smooth = 1
        loss = 1 - (2 * numer + smooth) / (denom + smooth)

        if self.reduction == 'mean':
            loss = loss.mean()
        return loss

    def BatchSoftDeviceLoss(self, preds, target):
        """

        :param preds:
        :param target:
        :return:
        """
        # overcome ignored label
        ignore = target.data.cpu() == self.ignore_index
        target = target.clone()
        target[ignore] = 0
        lb_one_hot = torch.zeros_like(preds).scatter_(1, target.unsqueeze(1), 1)
        ignore = ignore.nonzero()
        _, M = ignore.size()
        a, *b = ignore.chunk(M, dim=1)
        lb_one_hot[[a, torch.arange(lb_one_hot.size(1)).long(), *b]] = 0
        lb_one_hot = lb_one_hot.detach()

        # compute loss
        probs = torch.sigmoid(preds)
        numer = torch.sum((probs * lb_one_hot), dim=(2, 3))
        denom = torch.sum(probs.pow(1) + lb_one_hot.pow(1), dim=(2, 3))
        if not self.weights is None:
            numer = numer * self.weight.view(1, -1)
            denom = denom * self.weight.view(1, -1)
        numer = torch.sum(numer)
        denom = torch.sum(denom)
        smooth = 1
        loss = 1 - (2 * numer + smooth) / (denom + smooth)

        return loss


def clip_by_tensor(t, t_min, t_max):
    t = t.float()
    result = (t >= t_min).float() * t + (t < t_min).float() * t_min
    result = (result <= t_max).float() * result + (result > t_max).float() * t_max
    return result


def BCELoss(pred, target):
    epsilon = 1e-7
    pred = clip_by_tensor(pred, epsilon, 1.0 - epsilon)
    output = -target * torch.log(pred) - (1.0 - target) * torch.log(1.0 - pred)
    return output


class PostProcess:
    # BGR
    colors = [(0, 0, 255), (0, 255, 0), (255, 0, 0), (0, 255, 255)]

    @staticmethod
    def process(encode_image, threshold=600, v=True, index=0):

        target = np.zeros_like(encode_image)
        gray = cv2.cvtColor(encode_image, cv2.COLOR_BGR2GRAY)
        ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
        ans = []
        # 第一个参数 轮廓本身
        contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        for i in range(len(contours)):
            area = cv2.contourArea(contours[i])  # 计算轮廓面积，但是可能和像素点区域不太一样 收藏
            if area < threshold:
                continue
            else:
                epsilon = 0.001 * cv2.arcLength(contours[i], True)  # 得到轮廓的周长，精度为0.1*周长
                approx = cv2.approxPolyDP(contours[i], epsilon, True)
                # target = cv2.drawContours(target, [approx], 0, (0, 0, 255), 1)
                x, y, w, h = cv2.boundingRect(approx)
                approx = np.array([[x, y], [x + w, y], [x + w, y + h], [x, y + h]])
                approx = approx[:, np.newaxis, :]
                ans.append(approx)

        """
        得到所有ans后
        """

        res = prune(ans, v)

        for r in res:
            approx = np.array([[r.x1, r.y1], [r.x2, r.y1], [r.x2, r.y2], [r.x1, r.y2]])
            target = cv2.drawContours(target, [approx], 0, PostProcess.colors[index], -1)

        return target


def prune(ans: List, v=True):
    res = []
    for a in ans:
        r = Line(a[0][0][0], a[0][0][1], a[2][0][0], a[2][0][1], v)
        res.append(r)
    if v:
        res = sorted(res, key=lambda r: r.get_tail())
    else:
        res = sorted(res, key=lambda r: r.get_tail_reverse())

    ans = []
    for index, r in enumerate(res):
        if v:
            if index > 0:
                if abs(res[index].get_center_x - res[index - 1].get_center_x) <= 5:
                    item = ans.pop(-1)
                    ans.append(mergex(r, item))
                else:
                    ans.append(r)
            else:
                ans.append(r)
        else:
            if index > 0:
                if abs(res[index].get_center_y - res[index - 1].get_center_y) <= 5:
                    item = ans.pop(-1)
                    ans.append(mergey(r, item))
                else:
                    ans.append(r)
            else:
                ans.append(r)

    return ans


def mergex(res1, res2):
    if (res1.y1 - res2.y1) * (res1.y2 - res2.y2) < 0:
        return res1 if res1.y2 > res2.y2 else res2

    x1 = min(res1.x1, res2.x1)
    y1 = min(res1.y1, res2.y1)
    x2 = max(res1.x2, res2.x2)
    y2 = max(res1.y2, res2.y2)
    return Line(x1, y1, x2, y2, True)


def mergey(res1, res2):
    if (res1.x1 - res2.x1) * (res1.x2 - res2.x2) < 0:
        return res1 if res1.x2 > res2.x2 else res2

    x1 = min(res1.x1, res2.x1)
    y1 = min(res1.y1, res2.y1)
    x2 = max(res1.x2, res2.x2)
    y2 = max(res1.y2, res2.y2)
    return Line(x1, y1, x2, y2, False)


class FocalLoss(nn.Module):
    def __init__(self, ignore_index=255, gamma=2):
        super(FocalLoss, self).__init__()
        self.ignore_index = ignore_index
        self.gamma = gamma

    def forward(self, pred, target):
        logits = -F.cross_entropy(pred, target.long(), ignore_index=self.ignore_index)
        pt = torch.exp(logits)
        loss = -((1 - pt) ** self.gamma) * logits
        return loss


class DiceLoss(nn.Module):
    def __init__(self):
        super(DiceLoss, self).__init__()

    @staticmethod
    def calculate(pred, target):
        intersection = torch.sum(pred[target > 0])
        union = torch.sum(pred) + torch.sum(target > 0)
        return 1 - (2 * intersection + 1) / (union + 1e-7 + 1)
