from copy import deepcopy

import cv2
from mmengine.structures import InstanceData
import numpy as np
import torch
import torch.nn.functional as F

from ..utils import create_data_sample


class PostProcessor:
    def __init__(self, cfg):
        self.with_mask = False
        if hasattr(cfg.model, 'roi_head'):
            if (hasattr(cfg.model.roi_head, 'mask_head') and
                cfg.model.roi_head.mask_head):
                self.with_mask = True
        elif (hasattr(cfg.model, 'mask_head') and cfg.model.mask_head):
            self.with_mask = True

        test_cfg = cfg.model.test_cfg
        self.batch_size = cfg.test_dataloader.batch_size

        if self.with_mask:
            train_rcnn_cfg = cfg.model.train_cfg.rcnn
            if isinstance(train_rcnn_cfg, (list, tuple)):
                mask_size = train_rcnn_cfg[-1].mask_size
            else:
                mask_size = train_rcnn_cfg.mask_size
            if isinstance(mask_size, int):
                self.mask_size = (mask_size, mask_size)
            elif isinstance(mask_size, (list, tuple)):
                assert len(mask_size) == 2, 'Invalid format of `mask_size`, ' \
                    f'expected: (H, W) or [H, W], but got: {mask_size}'
                self.mask_size = tuple(mask_size)
            else:
                raise ValueError(f'`mask_size` is invalid: {mask_size}')

            self.mask_thr_binary = test_cfg.rcnn.mask_thr_binary

    def reshape_outputs(self, outputs):
        outputs_ = deepcopy(outputs)
        if self.with_mask:
            assert len(outputs_) == 3, \
                'Outputs must contain three item: dets, det_labels, and masks.'
        else:
            assert len(outputs_) == 2, \
                'Outputs must contain three item: dets and det_labels.'

        if len(outputs_[0].shape) == 1:
            outputs_[0] = outputs_[0].reshape(self.batch_size, -1, 5)
        if len(outputs_[1].shape) == 1:
            outputs_[1] = outputs_[1].reshape(self.batch_size, -1)

        if self.with_mask:
            if len(outputs_[2].shape) == 1:
                outputs_[2] = outputs_[2].reshape(
                    self.batch_size, -1, *self.mask_size)
        return outputs_

    @staticmethod
    def process_bboxes(bboxes, metas):
        margins = []
        scale_factors = []
        for i, info in enumerate(metas):
            if 'border' in info:
                # correct the bboxes coordinate on original image
                margin_top = info['border'][0]
                margin_left = info['border'][2]
                bboxes[i] -= torch.tensor([[
                    margin_left, margin_top, margin_left, margin_top]])
            if 'scale_factor' in info:
                # rescale bboxes to fit original image
                h_scale, w_scale = info['scale_factor']
                bboxes[i] /= torch.tensor([[
                    w_scale, h_scale, w_scale, h_scale]])

            # float coordinate convert to int
            # bboxes = bboxes.round().to(dtype=torch.long)

            # clip bboxes
            orig_height, orig_width = info['ori_shape']
            bboxes[i] = torch.cat([bboxes[i, :, 0:1].clip(0, orig_width),
                                   bboxes[i, :, 1:2].clip(0, orig_height),
                                   bboxes[i, :, 2:3].clip(0, orig_width),
                                   bboxes[i, :, 3:].clip(0, orig_height)],
                                  dim=-1)

        return bboxes

    def process_masks(self, bboxes, masks, metas):
        masks_ = []
        for i, info in enumerate(metas):
            img_masks = self._do_paste_mask(
                masks[i].unsqueeze(1),
                bboxes[i],
                img_h=info['ori_shape'][0],
                img_w=info['ori_shape'][1],
            )
            masks_.append(img_masks)
        return masks_

    def _do_paste_mask(self, masks, bboxes, img_h, img_w, skip_empty=False):
        """Paste instance masks according to bboxes.

        This implementation is modified from
        https://github.com/open-mmlab/mmdeploy/blob/v1.0.0/mmdeploy/codebase/

        Args:
            masks (Tensor): N, 1, H, W
            bboxes (Tensor): N, 4
            img_h (int): Height of the image to be pasted.
            img_w (int): Width of the image to be pasted.
            skip_empty (bool): Only paste masks within the region that
                tightly bound all bboxes, and returns the results this region
                only. An important optimization for CPU.

        Returns:
            tuple: (Tensor, tuple). The first item is mask tensor, the second
                one is the slice object.
            If skip_empty == False, the whole image will be pasted. It will
                return a mask of shape (N, img_h, img_w) and an empty tuple.
            If skip_empty == True, only area around the mask will be pasted.
                A mask of shape (N, h', w') and its start and end coordinates
                in the original image will be returned.
        """
        # On GPU, paste all masks together (up to chunk size)
        # by using the entire image to sample the masks
        # Compared to pasting them one by one,
        # this has more operations but is faster on COCO-scale dataset.

        # device = masks.device

        if skip_empty:
            x0_int, y0_int = torch.clamp(
                bboxes.min(dim=0).values.floor()[:2] - 1,
                min=0).to(dtype=torch.int32)
            x1_int = torch.clamp(
                bboxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
            y1_int = torch.clamp(
                bboxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
        else:
            x0_int, y0_int = 0, 0
            x1_int, y1_int = img_w, img_h
        x0, y0, x1, y1 = torch.split(bboxes, 1, dim=1)  # each is Nx1

        N = masks.shape[0]

        img_y = torch.arange(y0_int, y1_int).to(torch.float32) + 0.5
        img_x = torch.arange(x0_int, x1_int).to(torch.float32) + 0.5
        img_y = (img_y - y0) / (y1 - y0) * 2 - 1
        img_x = (img_x - x0) / (x1 - x0) * 2 - 1

        if torch.isinf(img_x).any():
            inds = torch.where(torch.isinf(img_x))
            img_x[inds] = 0
        if torch.isinf(img_y).any():
            inds = torch.where(torch.isinf(img_y))
            img_y[inds] = 0

        gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
        gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
        grid = torch.stack([gx, gy], dim=3)

        img_masks = F.grid_sample(
            masks.to(dtype=torch.float32), grid, align_corners=False)
        masks = img_masks[:, 0]

        if self.mask_thr_binary >= 0:
            masks = (masks >= self.mask_thr_binary).to(dtype=torch.bool)
        else:
            masks = (masks * 255).to(dtype=torch.uint8)

        return masks

    def parse_outputs(self, outputs, metas, return_datasample=False):
        for i in range(len(outputs)):
            if not isinstance(outputs[i], torch.Tensor):
                outputs[i] = torch.from_numpy(outputs[i])
        outputs = self.reshape_outputs(outputs)
        if not isinstance(metas, list):
            metas = [metas]
        num_image = len(metas)

        bboxes = self.process_bboxes(outputs[0][:num_image, :, :4], metas)
        scores = outputs[0][:num_image, :, -1]
        labels = outputs[1][:num_image]
        if self.with_mask:
            masks = self.process_masks(bboxes, outputs[2], metas)
        else:
            masks = []

        predictions = []
        for i in range(num_image):
            prediction = dict(bboxes=bboxes[i], scores=scores[i],
                              labels=labels[i])
            if masks:
                prediction['masks'] = masks[i]
            predictions.append(prediction)

        if not return_datasample:
            return predictions

        data_samples = []
        for prediction, info in zip(predictions, metas):
            data_sample = create_data_sample(info)
            data_sample.pred_instances = InstanceData(**prediction)
            data_samples.append(data_sample)

        return data_samples
