import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from utils.box import bbox_iou, non_max_suppression
import pdb
import cv2

# <class Criterion/>
class Criterion(nn.Module):
    """Some Information about Criterion"""
    # <method __init__>
    def __init__(
        self,
        grids, 
        anchors,
        num_classes,
        ignore_threshold
        ):
        super(Criterion, self).__init__()
        self._MSE = nn.MSELoss(reduction='none')
        self._BCE = nn.BCELoss(reduction='none')
        self._SL1 = nn.SmoothL1Loss(reduction='none')
        self._CEL = nn.CrossEntropyLoss(reduction='none')
        self._lamda_coor = 5.0
        self._lamda_obj_pos = 1.0
        self._lamda_obj_neg = 0.5
        self._grids = grids
        self._anchors = anchors
        self._num_classes = num_classes
        self._ignore_threshold = ignore_threshold
        assert(len(self._anchors) == len(self._grids))
        self._num_fms = len(self._anchors)
    # <method: __init__>
    
    # <method forward>
    def forward(self, pred, target, nI_h = 416, nI_w = 416, images = None):           
        # setp 1: get ignore conf mask (see https://github.com/pjreddie/darknet/blob/master/src/yolo_layer.c)
        batch_conf_ignore_mask = target[2].clone() * 0
        nC = self._num_classes
        nL = self._num_fms        
        nB = pred[0].size(0)
        for b in range(nB): 
            boxes_pred = pred[1][b].clone()
            boxes_truth = target[0][b].clone()
            boxes_truth = boxes_truth[boxes_truth[..., 4] > 0]
            if boxes_truth.size(0) > 0:
                for box_truth in boxes_truth:
                    ious = bbox_iou(box_truth.repeat(boxes_pred.size(0), 1), boxes_pred, x1y1x2y2=False)
                    batch_conf_ignore_mask[b][ious > self._ignore_threshold] = 1
                # end-for
            # end-if
        # end-for

        # # step 2: calculate the losses ...
        target_fms = target[1]
        target_mask_pos = target[2]
        target_mask_neg = 1 - (target_mask_pos + batch_conf_ignore_mask - target_mask_pos * batch_conf_ignore_mask)
        # target_mask_neg = target[3]
        target_yxhw_scale = target[4]
        # 
        if torch.__version__ == '1.2.0':
            target_mask_pos = target_mask_pos.bool()
            target_mask_neg = target_mask_neg.bool()
        else:
            target_mask_pos = target_mask_pos.byte()
            target_mask_neg = target_mask_neg.byte()
        # end-if 
        pred_pos = pred[0][target_mask_pos]
        pred_neg = pred[0][target_mask_neg]
        # 
        target_yxhw_scale_pos = target_yxhw_scale[target_mask_pos]
        target_pos = target_fms[target_mask_pos]
        target_neg = target_fms[target_mask_neg]        
        # # 
        if images is not None:
            for b in range(target_fms.size(0)):
                img = ((images[b].numpy() + 0.5) * 255).transpose(1, 2, 0).astype(np.uint8)
                img_show = img.copy()
                encoded_fm = target_fms[b]
                pred_decoded_boxes = pred[1][b].clone()
                pred_encoded_boxes = pred[0][b].clone()
                cnt = int(0)
                for l in range(3):
                    nG_h = self._grids[l][0]
                    nG_w = self._grids[l][0]
                    for gj in range(nG_h):
                        for gi in range(nG_w):
                            for n in range(len(self._anchors[l])):
                                if target_mask_pos[b][cnt] > 0:
                                    encoded_box = encoded_fm[cnt]                                    
                                    # draw target
                                    y = (encoded_box[0] + gj) / (nG_h - 1) * (nI_h - 1)
                                    x = (encoded_box[1] + gi) / (nG_w - 1) * (nI_w - 1)
                                    h = torch.exp(encoded_box[2]) * self._anchors[l][n][0]
                                    w = torch.exp(encoded_box[3]) * self._anchors[l][n][1]
                                    img_show = cv2.rectangle(img_show, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), [255, 255, 255], 2)
                                    # draw pred_decoded_boxes ...
                                    pred_decoded_box = pred_decoded_boxes[cnt]
                                    y = pred_decoded_box[0]
                                    x = pred_decoded_box[1]
                                    h = pred_decoded_box[2]
                                    w = pred_decoded_box[3]
                                    if l == 0:
                                        img_show = cv2.rectangle(img_show, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), [255, 0, 0], 1)
                                        cv2.putText(img_show, str( round(pred_decoded_box[4].item() * 100) / 100 ), (x-w/2, y-h/2), cv2.FONT_HERSHEY_SIMPLEX, 0.25, [255, 0, 0])
                                    elif l == 1:
                                        img_show = cv2.rectangle(img_show, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), [0, 255, 0], 1)
                                        cv2.putText(img_show, str( round(pred_decoded_box[4].item() * 100) / 100 ), (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.25, [0, 255, 0])
                                    elif l == 2:
                                        img_show = cv2.rectangle(img_show, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), [0, 0, 255], 1)
                                        cv2.putText(img_show, str( round(pred_decoded_box[4].item() * 100) / 100 ), (x+w/2, y+h/2), cv2.FONT_HERSHEY_SIMPLEX, 0.25, [0, 0, 255])
                                    # # draw pred_encoded_boxes ...
                                    # pred_encoded_box = pred_encoded_boxes[cnt]
                                    # y = (pred_encoded_box[0] + gj) / (nG_h - 1) * (nI_h - 1)
                                    # x = (pred_encoded_box[1] + gi) / (nG_w - 1) * (nI_w - 1)
                                    # h = torch.exp(pred_encoded_box[2]) * self._anchors[l][n][0]
                                    # w = torch.exp(pred_encoded_box[3]) * self._anchors[l][n][1]
                                    # img_show = cv2.rectangle(img_show, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), [0, 0, 255], 1)
                                    
                                # end-if
                                cnt += 1
                            # end-for
                        # end-for
                    # end-for
                # end-for
                cv2.imshow("img", img_show)
                cv2.waitKey(0)
            # end-for
        # end-if
        nT = target_mask_pos.sum()
        if nT > 0:
            loss_y = self._lamda_coor * self._BCE(pred_pos[..., 0], target_pos[..., 0]) * target_yxhw_scale_pos
            loss_x = self._lamda_coor * self._BCE(pred_pos[..., 1], target_pos[..., 1]) * target_yxhw_scale_pos
            loss_h = self._lamda_coor * self._MSE(pred_pos[..., 2], target_pos[..., 2]) * target_yxhw_scale_pos
            loss_w = self._lamda_coor * self._MSE(pred_pos[..., 3], target_pos[..., 3]) * target_yxhw_scale_pos
            # binary focal loss ...
            loss_conf_p = self._lamda_obj_pos * self._BCE(pred_pos[...,  4], target_pos[...,  4]) * torch.pow(1-pred_pos[..., 4], 2)
            loss_conf_n = self._lamda_obj_neg * self._BCE(pred_neg[...,  4], target_neg[...,  4]) * torch.pow(0-pred_neg[..., 4], 2)
            # hard negative minging ...
            torch.argsort(loss_conf_n, -1)

        losses_dict = { }        
        # loss of coordinates
        losses_dict['l_yx']     = self._lamda_coor * torch.sum( self._BCE(pred_pos[..., 0:2], target_pos[..., 0:2]) * target_yxhw_scale_pos.unsqueeze(1).repeat(1, 2) ) / nB if target_mask_pos.sum() > 0 else torch.zeros(1).to(pred_pos.device)
        losses_dict['l_hw']     = self._lamda_coor * torch.sum( self._MSE(pred_pos[..., 2:4], target_pos[..., 2:4]) * target_yxhw_scale_pos.unsqueeze(1).repeat(1, 2) ) / nB if target_mask_pos.sum() > 0 else torch.zeros(1).to(pred_pos.device)
        # focal loss & hard-neg-minging tricks
        losses_dict['l_conf_p'] = self._lamda_obj_pos * torch.sum( self._BCE(pred_pos[...,  4], target_pos[...,  4]) * torch.pow(1-pred_pos[..., 4], 2) ) / nB if target_mask_pos.sum() > 0 else torch.zeros(1).to(pred_pos.device)
        losses_dict['l_conf_n'] = self._lamda_obj_neg * torch.sum( self._BCE(pred_neg[...,  4], target_neg[...,  4]) * torch.pow(0-pred_neg[..., 4], 2) ) / nB if target_mask_neg.sum() > 0 else torch.zeros(1).to(pred_pos.device)
        # loss of classification
        if self._num_classes > 1:
            losses_dict['l_cls']  = self._lamda_obj_pos * torch.sum( self._BCE(pred_pos[..., 5:], target_pos[..., 5:]) ) / nB if target_mask_pos.sum() > 0 else torch.zeros(1).to(pred_pos.device)
            pass
        # end-if
        # add losses together
        losses_dict['total_loss'] =  0
        losses_dict['total_loss'] += losses_dict['l_yx']
        losses_dict['total_loss'] += losses_dict['l_hw']
        if self._num_classes > 1:
            losses_dict['total_loss'] += losses_dict['l_cls']
            pass
        # end-if
        losses_dict['total_loss'] += losses_dict['l_conf_p']
        losses_dict['total_loss'] += losses_dict['l_conf_n']
        # 
        return losses_dict
    # <method forward>
# </class Criterion>


# <class CriterionWithFocalLossAndOHNM/>
class CriterionWithFocalLossAndOHNM(nn.Module):
    """Some Information about Criterion"""
    # <method __init__>
    def __init__(
        self,
        grids, 
        anchors,
        num_classes,
        ignore_threshold
        ):
        super(CriterionWithFocalLossAndOHNM, self).__init__()
        self._MSE = nn.MSELoss(reduction='none')
        self._BCE = nn.BCELoss(reduction='none')
        self._SL1 = nn.SmoothL1Loss(reduction='none')
        self._CEL = nn.CrossEntropyLoss(reduction='none')
        self._lamda_coor = 1.0
        self._lamda_obj_pos = 1.0
        self._lamda_obj_neg = 1.0
        self._grids = grids
        self._anchors = anchors
        self._num_classes = num_classes
        self._ignore_threshold = ignore_threshold
        assert(len(self._anchors) == len(self._grids))
        self._num_fms = len(self._anchors)
        self._negative_mining_ratio = 3
        self._min_hard_negatives = 0
    # <method: __init__>
    
    # <method forward>
    def forward(self, pred, target, nI_h = 416, nI_w = 416, images = None):           
        # setp 1: get ignore conf mask (see https://github.com/pjreddie/darknet/blob/master/src/yolo_layer.c)
        batch_conf_ignore_mask = target[2].clone() * 0
        nC = self._num_classes
        nL = self._num_fms        
        nB = pred[0].size(0)
        for b in range(nB): 
            boxes_pred = pred[1][b].clone()
            boxes_truth = target[0][b].clone()
            boxes_truth = boxes_truth[boxes_truth[..., 4] >= 0]
            if boxes_truth.size(0) > 0:
                for box_truth in boxes_truth:
                    ious = bbox_iou(box_truth.repeat(boxes_pred.size(0), 1), boxes_pred, x1y1x2y2=False)
                    batch_conf_ignore_mask[b][ious > self._ignore_threshold] = 1
                # end-for
            # end-if
        # end-for

        # # step 2: calculate the losses ...
        target_fms = target[1]
        target_mask_pos = target[2]
        target_mask_neg = 1 - (target_mask_pos + batch_conf_ignore_mask - target_mask_pos * batch_conf_ignore_mask)
        # target_mask_neg = target[3]
        target_yxhw_scale = target[4]
        # 
        if torch.__version__ == '1.2.0':
            target_mask_pos = target_mask_pos.bool()
            target_mask_neg = target_mask_neg.bool()
        else:
            target_mask_pos = target_mask_pos.byte()
            target_mask_neg = target_mask_neg.byte()
        # end-if 
        losses_dict = { } 
        losses_dict['total_loss'] = torch.zeros(1).to(pred[0].device)
        losses_dict['l_yx'] = torch.zeros(1).to(pred[0].device)
        losses_dict['l_hw'] = torch.zeros(1).to(pred[0].device)
        losses_dict['l_conf_p'] = torch.zeros(1).to(pred[0].device)
        losses_dict['l_conf_n'] = torch.zeros(1).to(pred[0].device)
        if self._num_classes > 1:
            losses_dict['l_cls'] = torch.zeros(1).to(pred[0].device)
        K = 0
        for b in range(nB): 
            target_yxhw_scale_pos = target_yxhw_scale[b][target_mask_pos[b]]
            # 
            pred_pos = pred[0][b][target_mask_pos[b]]
            pred_neg = pred[0][b][target_mask_neg[b]]
            # 
            target_pos = target_fms[b][target_mask_pos[b]]
            target_neg = target_fms[b][target_mask_neg[b]]        
            # 
            nT = target_mask_pos[b].sum()
            if nT > 0:
                K += 1
            # end-if
            # boxes location loss
            loss_y = self._lamda_coor * self._BCE(pred_pos[..., 0], target_pos[..., 0]) * target_yxhw_scale_pos
            loss_x = self._lamda_coor * self._BCE(pred_pos[..., 1], target_pos[..., 1]) * target_yxhw_scale_pos
            loss_h = self._lamda_coor * self._MSE(pred_pos[..., 2], target_pos[..., 2]) * target_yxhw_scale_pos
            loss_w = self._lamda_coor * self._MSE(pred_pos[..., 3], target_pos[..., 3]) * target_yxhw_scale_pos
            losses_dict['l_yx'] += (loss_x + loss_y).sum() / max(1, nT) / 2
            losses_dict['l_hw'] += (loss_w + loss_h).sum() / max(1, nT) / 2
            # binary-focal-loss tirck for confidence loss...
            loss_conf_p = self._lamda_obj_pos * self._BCE(pred_pos[...,  4], target_pos[...,  4]) * torch.pow(1-pred_pos[..., 4], 2)
            loss_conf_n = self._lamda_obj_neg * self._BCE(pred_neg[...,  4], target_neg[...,  4]) * torch.pow(0-pred_neg[..., 4], 2)
            # hard-negative-mining trick for confidence loss ...
            _, indices = loss_conf_n.sort(0, descending=True)
            _, rank = indices.sort(0)
            loss_conf_n = loss_conf_n[rank < (nT * self._negative_mining_ratio)]
            losses_dict['l_conf_p'] += loss_conf_p.sum() / max(1, nT)
            losses_dict['l_conf_n'] += loss_conf_n.sum() / max(1, nT)
            # classification loss ...
            if self._num_classes > 1:
                loss_cls  = self._lamda_obj_pos * self._BCE(pred_pos[..., 5:], target_pos[..., 5:])
                losses_dict['l_cls'] += loss_cls.sum() / max(1, nT)
            # end-if
        # end-for
        losses_dict['l_yx'] /= K
        losses_dict['l_hw'] /= K
        losses_dict['l_conf_p'] /= K
        losses_dict['l_conf_n'] /= K
        if self._num_classes > 1:
            losses_dict['l_cls'] /= K
        # end-if
        # add losses together
        losses_dict['total_loss'] += losses_dict['l_yx']
        losses_dict['total_loss'] += losses_dict['l_hw']
        losses_dict['total_loss'] += losses_dict['l_conf_p']
        losses_dict['total_loss'] += losses_dict['l_conf_n']
        if self._num_classes > 1:
            losses_dict['total_loss'] += losses_dict['l_cls']
            pass
        # end-if
        # 
        return losses_dict
    # <method forward>
# </class CriterionWithFocalLossAndOHNM>