import time

import torch
from tqdm import tqdm

import numpy as np

from yolox.utils import postprocess, time_synchronized, synchronize, xyxy2xywh
from loguru import logger


def calc_iou(bboxes1, bboxes2, bbox_mode='xywh'):
    assert len(bboxes1.shape) == 2 and bboxes1.shape[1] == 4
    assert len(bboxes2.shape) == 2 and bboxes2.shape[1] == 4

    bboxes1 = bboxes1.copy()
    bboxes2 = bboxes2.copy()

    if bbox_mode == 'xywh':
        bboxes1[:, 2:] += bboxes1[:, :2]
        bboxes2[:, 2:] += bboxes2[:, :2]

    x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1)
    x21, y21, x22, y22 = np.split(bboxes2, 4, axis=1)
    xA = np.maximum(x11, np.transpose(x21))
    yA = np.maximum(y11, np.transpose(y21))
    xB = np.minimum(x12, np.transpose(x22))
    yB = np.minimum(y12, np.transpose(y22))
    interArea = np.maximum((xB - xA + 1), 0) * np.maximum((yB - yA + 1), 0)
    boxAArea = (x12 - x11 + 1) * (y12 - y11 + 1)
    boxBArea = (x22 - x21 + 1) * (y22 - y21 + 1)
    iou = interArea / (boxAArea + np.transpose(boxBArea) - interArea)
    return iou


def f_beta(tp, fp, fn, beta=2):
    return (1 + beta ** 2) * tp / ((1 + beta ** 2) * tp + beta ** 2 * fn + fp)


def calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th, verbose=False):
    gt_bboxes = gt_bboxes.copy()
    pred_bboxes = pred_bboxes.copy()

    tp = 0
    fp = 0
    for k, pred_bbox in enumerate(pred_bboxes):  # fixed in ver.7
        ious = calc_iou(gt_bboxes, pred_bbox[None, 1:], 'xyxy')
        max_iou = ious.max()
        if max_iou > iou_th:
            tp += 1
            gt_bboxes = np.delete(gt_bboxes, ious.argmax(), axis=0)
        else:
            fp += 1
        if len(gt_bboxes) == 0:
            fp += len(pred_bboxes) - (k + 1)  # fix in ver.7
            break

    fn = len(gt_bboxes)
    return tp, fp, fn


def calc_is_correct(gt_bboxes, pred_bboxes):
    """
    gt_bboxes: (N, 4) np.array in xywh format
    pred_bboxes: (N, 5) np.array in conf+xywh format
    """
    if len(gt_bboxes) == 0 and len(pred_bboxes) == 0:
        tps, fps, fns = 0, 0, 0
        return tps, fps, fns

    elif len(gt_bboxes) == 0:
        tps, fps, fns = 0, len(pred_bboxes), 0
        return tps, fps, fns

    elif len(pred_bboxes) == 0:
        tps, fps, fns = 0, 0, len(gt_bboxes)
        return tps, fps, fns

    pred_bboxes = pred_bboxes[pred_bboxes[:, 0].argsort()[::-1]]  # sort by conf

    tps, fps, fns = 0, 0, 0
    for iou_th in np.arange(0.3, 0.85, 0.05):
        tp, fp, fn = calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th)
        tps += tp
        fps += fp
        fns += fn
    return tps, fps, fns


def calc_f2_score(gt_bboxes_list, pred_bboxes_list, verbose=False):
    """
    gt_bboxes_list: list of (N, 4) np.array in xywh format
    pred_bboxes_list: list of (N, 5) np.array in conf+xywh format
    """
    tps, fps, fns = 0, 0, 0
    for gt_bboxes, pred_bboxes in zip(gt_bboxes_list, pred_bboxes_list):
        tp, fp, fn = calc_is_correct(gt_bboxes, pred_bboxes)
        tps += tp
        fps += fp
        fns += fn
        if verbose:
            num_gt = len(gt_bboxes)
            num_pred = len(pred_bboxes)
            print(
                f'num_gt:{num_gt:<3} num_pred:{num_pred:<3} tp:{tp:<3} fp:{fp:<3} fn:{fn:<3}')
    return f_beta(tps, fps, fns, beta=2)


class CustomEvaluator:
    def __init__(
            self,
            data_loader,
            img_size: tuple,
            conf_thre: float,
            nms_thre: float,
            num_classes: int,
    ):
        """
        Args:
            dataloader (Dataloader): evaluate dataloader.
            img_size: image size after preprocess. images are resized
                to squares whose shape is (img_size, img_size).
            confthre: confidence threshold ranging from 0 to 1, which
                is defined in the config file.
            nmsthre: IoU threshold of non-max supression ranging from 0 to 1.
            per_class_mAP: Show per class mAP during evalution or not. Default to False.
        """
        self.data_loader = data_loader
        self.img_size = img_size
        self.conf_thre = conf_thre
        self.nms_thre = nms_thre
        self.num_classes = num_classes

    def evaluate(
            self,
            model,
            distributed=False,
            half=False,
            trt_file=None,
            decoder=None,
            test_size=None,
    ):
        tensor_type = torch.cuda.HalfTensor if half else torch.cuda.FloatTensor
        model = model.eval()
        if half:
            model = model.half()
        gt_bboxes_list = []
        pred_bboxes_list = []
        progress_bar = tqdm

        inference_time = 0
        nms_time = 0
        n_samples = max(len(self.data_loader) - 1, 1)

        for cur_iter, (imgs, _, info_imgs, ids) in enumerate(
                progress_bar(self.data_loader)
        ):
            with torch.no_grad():
                imgs = imgs.type(tensor_type)

                # skip the the last iters since batchsize might be not enough for batch inference
                is_time_record = cur_iter < len(self.data_loader) - 1
                if is_time_record:
                    start = time.time()

                outputs = model(imgs)
                if decoder is not None:
                    outputs = decoder(outputs, dtype=outputs.type())

                if is_time_record:
                    infer_end = time_synchronized()
                    inference_time += infer_end - start

                outputs = postprocess(
                    outputs, self.num_classes, self.conf_thre, self.nms_thre
                )

                if is_time_record:
                    nms_end = time_synchronized()
                    nms_time += nms_end - infer_end

                for i in range(len(outputs)):
                    img_h = info_imgs[0][i]
                    img_w = info_imgs[1][i]
                    res, img_info, resized_info, file_name \
                        = self.data_loader \
                        .dataset.load_anno_from_ids(ids[i][0].item())
                    gt_bboxes_list.append(res[:, :4])
                    output = outputs[i]
                    if output is None:
                        pred_bboxes_list.append([])
                    else:
                        output = output.cpu()

                        bboxes = output[:, 0:4]

                        scale = min(
                            self.img_size[0] / float(img_h),
                            self.img_size[1] / float(img_w)
                        )
                        bboxes /= scale
                        bboxes = xyxy2xywh(bboxes)

                        scores = output[:, 4] * output[:, 5]

                        item = torch.cat((scores.unsqueeze(-1), bboxes),
                                         1).numpy()

                        pred_bboxes_list.append(item)

        statistics = torch.cuda.FloatTensor(
            [inference_time, nms_time, n_samples])

        logger.info("Evaluate in main process...")

        inference_time = statistics[0].item()
        nms_time = statistics[1].item()
        n_samples = statistics[2].item()

        a_infer_time = 1000 * inference_time / (
                n_samples * self.data_loader.batch_size)
        a_nms_time = 1000 * nms_time / (n_samples * self.data_loader.batch_size)

        time_info = ", ".join(
            [
                "Average {} time: {:.2f} ms".format(k, v)
                for k, v in zip(
                ["forward", "NMS", "inference"],
                [a_infer_time, a_nms_time, (a_infer_time + a_nms_time)],
            )]
        )

        info = time_info + "\n"

        synchronize()
        return calc_f2_score(gt_bboxes_list, pred_bboxes_list, verbose=True), \
               info
