import copy
import time

import cv2
import numpy as np
import torch
import torch.nn as nn
from torch.nn import CTCLoss
import einops
from tqdm import tqdm

from FOTS.model.ghm import GHMC
from FOTS.utils.util import str_label_converter

import torch
import torch.nn as nn

from sys_mgr import MetricLogger, GlobMgr, BaseMetricLogger


def get_acc_from_logits_and_gt(gt, pred, verbose=True):
    decode_gt = str_label_converter.decode(gt[0], torch.tensor(gt[1]))
    # gt only need remove #
    if isinstance(decode_gt, list) is False:
        decode_gt = [decode_gt]
    for cnt in range(len(decode_gt)):
        decode_gt[cnt] = decode_gt[cnt].replace('#', '')

    pred_tensor = torch.softmax(pred[0], dim=-1).argmax(dim=-1).t().detach().cpu()
    decode_pred = str_label_converter.decode(pred_tensor, torch.tensor(pred[1].int()))
    if isinstance(decode_pred, list) is False:
        decode_pred = [decode_pred]
    decode_pred = str_label_converter.remove_duplicate(decode_pred)

    acc = (np.array(decode_pred) == np.array(decode_gt)).sum() / float(len(decode_gt))
    if verbose:
        print("  \n {} gt:{} pred:{}  acc:{:.2f}".format(time.time(), decode_gt, decode_pred, acc))
    return acc


def eval_model(model, epoch, eval_loader):
    acc = 0.0
    model.eval()
    # data = dict(image_names=imagePaths,
    #                 images=images.float(),
    #                 score_maps=score_maps.float(),
    #                 geo_maps=geo_maps.float(),
    #                 training_masks=training_masks.bool(),
    #                 transcripts=(texts, lengths),
    #                 bboxes=bboxs.float(),
    #                 mapping=mapping.int(),
    #                 rois=batched_rois.float())
    metric_logger_val = BaseMetricLogger()
    for input_data in eval_loader:
        bboxes = input_data['bboxes'].cuda()
        rois = input_data['rois'].cuda()
        images = input_data['images'].cuda()

        output = model(images=images,
                       boxes=bboxes,
                       rois=rois)
        y_true_recog = (input_data['transcripts'][0],
                        input_data['transcripts'][1])
        y_pred_recog = output['transcripts']
        acc = get_acc_from_logits_and_gt(y_true_recog, y_pred_recog, verbose=False)

        metric_logger_val.update(acc, 0.0, epoch)

    print(" \n VAL EPOCH:{} ACC:{}".format(epoch, metric_logger_val.get_acc(epoch)))

    model.train()
    return acc


def get_dice_loss(gt_score, pred_score):
    inter = torch.sum(gt_score * pred_score)
    union = torch.sum(gt_score) + torch.sum(pred_score) + 1e-5
    return 1. - (2 * inter / union)


def get_geo_loss(gt_geo, pred_geo):
    d1_gt, d2_gt, d3_gt, d4_gt, angle_gt = torch.split(gt_geo, 1, 1)
    d1_pred, d2_pred, d3_pred, d4_pred, angle_pred = torch.split(pred_geo, 1, 1)
    area_gt = (d1_gt + d2_gt) * (d3_gt + d4_gt)
    area_pred = (d1_pred + d2_pred) * (d3_pred + d4_pred)
    w_union = torch.min(d3_gt, d3_pred) + torch.min(d4_gt, d4_pred)
    h_union = torch.min(d1_gt, d1_pred) + torch.min(d2_gt, d2_pred)
    area_intersect = w_union * h_union
    area_union = area_gt + area_pred - area_intersect
    iou_loss_map = -torch.log((area_intersect + 1.0) / (area_union + 1.0))
    angle_loss_map = 1 - torch.cos(angle_pred - angle_gt)
    return iou_loss_map, angle_loss_map


class DetectionLoss(nn.Module):
    def __init__(self, weight_angle=10):
        super(DetectionLoss, self).__init__()
        self.weight_angle = weight_angle
        self.ghmc = GHMC()

    def forward(self, gt_score, pred_score, gt_geo, pred_geo, ignored_map):
        if torch.sum(gt_score) < 1:
            return torch.sum(pred_score + pred_geo) * 0, torch.sum(pred_score + pred_geo) * 0

        # classify_loss = self.ghmc(einops.rearrange(pred_score, 'b c h w -> (b h w) c'),
        #                           einops.rearrange(gt_score, 'b c h w -> (b h w) c'),
        #                           einops.rearrange(ignored_map, 'b c h w -> (b h w) c'))
        for i in range(1):
            cv2.imshow("loss gt score{}".format(i),
                       ((gt_score.detach().cpu().permute(0, 2, 3, 1)[i]) * 255).numpy().astype(np.uint8))
            cv2.imshow("loss pred score{}".format(i),
                       ((pred_score.detach().cpu()).permute(0, 2, 3, 1)[i] * 255).numpy().astype(np.uint8))

        classify_loss = get_dice_loss(gt_score, pred_score * (1 - ignored_map.byte()))
        iou_loss_map, angle_loss_map = get_geo_loss(gt_geo, pred_geo)

        angle_loss = torch.sum(angle_loss_map * gt_score) / torch.sum(gt_score)
        iou_loss = torch.sum(iou_loss_map * gt_score) / torch.sum(gt_score)
        geo_loss = self.weight_angle * angle_loss + iou_loss
        # print('classify loss is {:.8f}, angle loss is {:.8f}, iou loss is {:.8f}'.format(classify_loss, angle_loss, iou_loss))
        return geo_loss, classify_loss


# class DetectionLoss(nn.Module):
#     def __init__(self):
#         super(DetectionLoss, self).__init__()
#         self.ghmc = GHMC()
#
#
#     def forward(self, y_true_cls, y_pred_cls,
#                 y_true_geo, y_pred_geo,
#                 training_mask):
#
#         classification_loss = self.__dice_coefficient(y_true_cls, y_pred_cls, training_mask)
#
#         # classification_loss = self.ghmc(einops.rearrange(y_pred_cls, 'b c h w -> (b h w) c'),
#         #                                 einops.rearrange(y_true_cls, 'b c h w -> (b h w) c'),
#         #                                 einops.rearrange(training_mask, 'b c h w -> (b h w) c'))
#
#
#         # d1 -> top, d2->right, d3->bottom, d4->left
#         #     d1_gt, d2_gt, d3_gt, d4_gt, theta_gt = tf.split(value=y_true_geo, num_or_size_splits=5, axis=3)
#         d1_gt, d2_gt, d3_gt, d4_gt, theta_gt = torch.split(y_true_geo, 1, 1)
#         #     d1_pred, d2_pred, d3_pred, d4_pred, theta_pred = tf.split(value=y_pred_geo, num_or_size_splits=5, axis=3)
#         d1_pred, d2_pred, d3_pred, d4_pred, theta_pred = torch.split(y_pred_geo, 1, 1)
#         area_gt = (d1_gt + d3_gt) * (d2_gt + d4_gt)
#         area_pred = (d1_pred + d3_pred) * (d2_pred + d4_pred)
#         w_union = torch.min(d2_gt, d2_pred) + torch.min(d4_gt, d4_pred)
#         h_union = torch.min(d1_gt, d1_pred) + torch.min(d3_gt, d3_pred)
#         area_intersect = w_union * h_union
#         area_union = area_gt + area_pred - area_intersect
#         L_AABB = -torch.log((area_intersect + 1.0) / (area_union + 1.0))
#         L_theta = 1 - torch.cos(theta_pred - theta_gt)
#         L_g = L_AABB + 10 * L_theta
#
#         return torch.mean(L_g * y_true_cls * training_mask), classification_loss
#
#     def __dice_coefficient(self, y_true_cls, y_pred_cls, training_mask):
#         '''
#         dice loss
#         :param y_true_cls:
#         :param y_pred_cls:
#         :param training_mask:
#         :return:
#         '''
#         eps = 1e-5
#         intersection = torch.sum(y_true_cls * y_pred_cls * training_mask)
#         union = torch.sum(y_true_cls * training_mask) + torch.sum(y_pred_cls * training_mask) + eps
#         loss = 1. - (2 * intersection / union)
#
#         return loss
#
#     def __cross_entroy(self, y_true_cls, y_pred_cls, training_mask):
#         #import ipdb; ipdb.set_trace()
#         return torch.nn.functional.binary_cross_entropy(y_pred_cls*training_mask, (y_true_cls*training_mask))


class RecognitionLoss(nn.Module):

    def __init__(self):
        super(RecognitionLoss, self).__init__()
        self.ctc_loss = CTCLoss()  # pred, pred_len, labels, labels_len

    def zip_target(self, gt):
        cur_idx = 0
        zipped_target = torch.zeros(sum(gt[1]))
        for i, length in enumerate(gt[1]):
            length = length
            node_target = gt[0][i][0:length]
            zipped_target[cur_idx:cur_idx + length] = node_target
            cur_idx += length
        return zipped_target

    def forward(self, *input):
        gt, pred = input[0], input[1]

        with torch.no_grad():
            acc = get_acc_from_logits_and_gt(gt, pred)

        # pad mode
        loss = self.ctc_loss(torch.log_softmax(pred[0], dim=-1), gt[0],
                             # pred[1].int(),
                             torch.fill_(torch.ones_like(pred[1]), pred[0].shape[0]).int(),
                             gt[1].int()
                             )
        GlobMgr.metric_loger.update(acc, float(loss))
        # un pad mode
        # zipped_target = self.zip_target(gt)
        # loss = self.ctc_loss(torch.log_softmax(pred[0], dim=-1), zipped_target,
        #                      pred[1].int(),
        #                      gt[1].int(),
        #                      # torch.fill_(torch.ones_like(gt[1]), gt[1].max()).int()
        #                      )
        if torch.isnan(loss):
            raise RuntimeError()
        return loss


class FOTSLoss(nn.Module):

    def __init__(self, config):
        super(FOTSLoss, self).__init__()
        self.mode = config['model']['mode']
        self.detectionLoss = DetectionLoss()
        self.recogitionLoss = RecognitionLoss()
        self.lamb = 10

    def forward(self, y_true_cls, y_pred_cls,
                y_true_geo, y_pred_geo,
                y_true_recog, y_pred_recog,
                training_mask):

        if self.mode == 'recognition':
            recognition_loss = self.recogitionLoss(y_true_recog, y_pred_recog)
            recognition_loss = self.lamb * recognition_loss
            reg_loss = torch.tensor([0.], device=recognition_loss.device)
            cls_loss = torch.tensor([0.], device=recognition_loss.device)
        elif self.mode == 'detection':
            reg_loss, cls_loss = self.detectionLoss(y_true_cls, y_pred_cls,
                                                    y_true_geo, y_pred_geo, training_mask)
            recognition_loss = torch.tensor([0.], device=reg_loss.device)
        elif self.mode == 'united':
            reg_loss, cls_loss = self.detectionLoss(y_true_cls, y_pred_cls, y_true_geo, y_pred_geo, training_mask)
            if y_true_recog:
                recognition_loss = self.recogitionLoss(y_true_recog, y_pred_recog)
                recognition_loss = self.lamb * recognition_loss
                if recognition_loss < 0:
                    import ipdb;
                    ipdb.set_trace()

        # recognition_loss = recognition_loss.to(detection_loss.device)
        return dict(reg_loss=reg_loss, cls_loss=cls_loss, recog_loss=recognition_loss)


if __name__ == '__main__':
    pred = torch.load("pred.pt")
    gt = torch.load("gt.pt")
    # gt = (gt[0][:, 0:5][1].reshape(1, 5).detach().cpu(), gt[1][1])
    # pred = (pred[0][:, 1, :].reshape(-1, 1, 102).detach().cpu(), torch.tensor(14))

    # pred[0][11][0][80] = 199.
    # pred[0][10][0][80] = 29.
    # pred[0][9][0][80] = 19.
    # pred[0][7][0][80] = 79.
    ctc_loss = RecognitionLoss()

    print(ctc_loss(*[gt, pred]))
