# https://github.com/MengyangPu/RINDNet/tree/main
import torch
import numpy as np
from torch.utils.data import Dataset
from torchvision.ops import box_iou
#L1 
import torch.nn as nn
import torch.nn.functional as F
_HOUGHVOTING_NUM_INLIER = 500
_HOUGHVOTING_DIRECTION_INLIER = 0.9
_LABEL2MASK_THRESHOL = 500

def loss_cross_entropy(scores, labels):
    """
    scores: a tensor [batch_size, num_classes, height, width]
    labels: a tensor [batch_size, num_classes, height, width]
    """

    cross_entropy = -torch.sum(labels * torch.log(scores + 1e-10), dim=1)
    loss = torch.div(torch.sum(cross_entropy), torch.sum(labels)+1e-10)

    return loss

def loss_Rotation(pred_R, gt_R, label, model):
    """
    pred_R: a tensor [N, 3, 3]
    gt_R: a tensor [N, 3, 3]
    label: a tensor [N, ]
    model: a tensor [N_cls, 1024, 3]
    """
    device = pred_R.device
    models_pcd = model[label - 1].to(device)
    gt_points = models_pcd @ gt_R
    pred_points = models_pcd @ pred_R
    loss = ((pred_points - gt_points) ** 2).sum(dim=2).sqrt().mean()
    return loss

def loss_rotation(tgt_rot, src_rot):
    """
    Compute the loss related to the rotation of pose estimation represented by a 3x3 rotation matrix.
    The function calculates the geodesic distance between the predicted and target rotation.
    L = arccos( 0.5 * (Trace(R\tilde(R)^T) -1)
    Calculates the loss in radiant.
    """
    eps = 1e-6
    n_obj = len(tgt_rot)
    product = torch.bmm(src_rot, tgt_rot.transpose(1, 2))
    trace = torch.sum(product[:, torch.eye(3).bool()], 1)
    theta = torch.clamp(0.5 * (trace - 1), -1 + eps, 1 - eps)
    rad = torch.acos(theta)
    # print(torch.rad2deg(rad))
    return rad.sum() / n_obj


# HED-UNet
def focal_loss_with_logits(y_hat_log, y, gamma=2):
    # y_hat_log=F.sigmoid(y_hat_log)
    log0 = F.logsigmoid(-y_hat_log)
    log1 = F.logsigmoid(y_hat_log)

    gamma0 = torch.pow(torch.abs(1 - y - torch.exp(log0)), gamma)
    gamma1 = torch.pow(torch.abs(y - torch.exp(log1)), gamma)

    return torch.mean(-(1 - y) * gamma0 * log0 - y * gamma1 * log1)


class L1Loss(nn.Module): # cocal loss 
    def __init__(self):
        super(L1Loss, self).__init__()
        self.loss = nn.L1Loss(size_average=True)
    def forward(self, input, target):
        return self.loss(input, target)
    
    
def attentional_focal_loss3(output,target,bs,alpha,gamma):
    sigmoid_output = F.sigmoid(output) #
    
    weight = target * alpha * (4.0**((1.0 - sigmoid_output)**0.5)) + (1.0 - target) *(1.0 - alpha) * (4.0**(sigmoid_output** 0.5))
    weight=weight.detach()
    loss = F.binary_cross_entropy_with_logits(output, target, weight,reduce="mean")
    # loss = torch.sum(loss)
    loss=loss

    return loss

class Focal_L1_Loss(nn.Module): #
    def __init__(self,alpha=0.1,gamma=2,lamda=0.5):
        super(Focal_L1_Loss, self).__init__()
        self.alpha = alpha
        self.gamma = gamma
        self.lamda = lamda

    def forward(self,output_b,label):
        """
        output_b: [N,1,H,W]
        label: [N,1,H,W]
        """
        batch_size, _, height, width = label.size()
        label_b =  label
        output_b,label_b = output_b.contiguous().view(batch_size*height*width,),\
                                            label_b.contiguous().view(batch_size*height*width,),\
        
        num_pos,num_neg = torch.sum(label_b==1).float(),torch.sum(label_b==0).float()
        alpha = num_neg/(num_pos+num_neg)*1.0

        loss_focal = attentional_focal_loss3(output_b,label_b,batch_size,alpha,self.gamma) #损失函数
        return  loss_focal
    

def IOUselection(pred_bbxes, gt_bbxes, threshold):
    """
        pred_bbx is N_pred_bbx * 6 (batch_ids, x1, y1, x2, y2, cls)
        gt_bbx is gt_bbx * 6 (batch_ids, x1, y1, x2, y2, cls)
        threshold : threshold of IOU for selection of predicted bbx
    """
    device = pred_bbxes.device
    output_bbxes = torch.empty((0, 6)).to(device = device, dtype =torch.float)
    for pred_bbx in pred_bbxes:
        for gt_bbx in gt_bbxes:
            if pred_bbx[0] == gt_bbx[0] and pred_bbx[5] == gt_bbx[5]:
                iou = box_iou(pred_bbx[1:5].unsqueeze(dim=0), gt_bbx[1:5].unsqueeze(dim=0)).item()
                if iou > threshold:
                    output_bbxes = torch.cat((output_bbxes, pred_bbx.unsqueeze(dim=0)), dim=0)
    return output_bbxes


def HoughVoting(label, centermap, num_classes=10):
    """
    label [bs, 3, H, W]
    centermap [bs, 3*maxinstance, H, W]
    """
    batches, H, W = label.shape
    x = np.linspace(0, W - 1, W)
    y = np.linspace(0, H - 1, H)
    xv, yv = np.meshgrid(x, y)
    xy = torch.from_numpy(np.array((xv, yv))).to(device = label.device, dtype=torch.float32)
    x_index = torch.from_numpy(x).to(device = label.device, dtype=torch.int32)
    centers = torch.zeros(batches, num_classes, 2)
    depths = torch.zeros(batches, num_classes)
    for bs in range(batches):
        for cls in range(1, num_classes + 1):
            if (label[bs] == cls).sum() >= _LABEL2MASK_THRESHOL:
                pixel_location = xy[:2, label[bs] == cls]
                pixel_direction = centermap[bs, (cls-1)*3:cls*3][:2, label[bs] == cls]
                y_index = x_index.unsqueeze(dim=0) - pixel_location[0].unsqueeze(dim=1)
                y_index = torch.round(pixel_location[1].unsqueeze(dim=1) + (pixel_direction[1]/pixel_direction[0]).unsqueeze(dim=1) * y_index).to(torch.int32)
                mask = (y_index >= 0) * (y_index < H)
                count = y_index * W + x_index.unsqueeze(dim=0)
                center, inlier_num = torch.bincount(count[mask]).argmax(), torch.bincount(count[mask]).max()
                center_x, center_y = center % W, torch.div(center, W, rounding_mode='trunc')
                if inlier_num > _HOUGHVOTING_NUM_INLIER:
                    centers[bs, cls - 1, 0], centers[bs, cls - 1, 1] = center_x, center_y
                    xyplane_dis = xy - torch.tensor([center_x, center_y])[:, None, None].to(device = label.device)
                    xyplane_direction = xyplane_dis/(xyplane_dis**2).sum(dim=0).sqrt()[None, :, :]
                    predict_direction = centermap[bs, (cls-1)*3:cls*3][:2]
                    inlier_mask = ((xyplane_direction * predict_direction).sum(dim=0).abs() >= _HOUGHVOTING_DIRECTION_INLIER) * label[bs] == cls
                    depths[bs, cls - 1] = centermap[bs, (cls-1)*3:cls*3][2, inlier_mask].mean()
    return centers, depths
