"""
Reference1: Automatic detection of sub-km craters in high resolution planetary images 
Reference2: Detection of sub-kilometer craters in high resolution planetary images using shape and texture features  
"""

import numpy as np
import cv2
import torch
from models.detect.base import BaseDetector
from typing import Iterator


class BandeiraDetector(torch.nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__()

    def forward(self, raw: torch.Tensor) -> Iterator[list]:
        """
        API for detecting craters in a pytorch-lightning framework
        Arguments:
            raw: (torch.Tensor), shape=(batch_size, 3, height, width)
        Returns:
            torch.Tensor: shape=(batch_size, n, 4), represent of bounding box in xywh format
        """
        for i, img in enumerate(
            map(lambda item: item.permute(1, 2, 0).cpu().numpy(), raw)
        ):
            img = (img + 1) * 127.5
            img = img.astype("uint8")
            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
            bboxs = list(self.detect(img))
            yield torch.tensor(bboxs, dtype=torch.float32, device=raw.device).to(
                memory_format=torch.contiguous_format
            )

    def detect(self, image: np.ndarray, ind: int = None) -> Iterator:
        # extraction of highlight/shadow shapes
        highlight = self.extract(image)
        shadow = self.extract(255 - image)
        # Hu filter
        for x, y, w, h in self._match(image, highlight, shadow):
            if x > 0 and y > 0 and x + w < image.shape[0] and y + h < image.shape[1]:
                if ind is not None:
                    yield [ind, x, y, x + w, y + h]
                yield [x, y, x + w, y + h]

    def extract(self, image):
        # Extract highlight and shadow shapes from input image
        # Highlight shapes are extracted by thresholding the image
        # Shadow shapes are extracted by thresholding the negative of the image
        # removal of background
        # Removal of oversize shapes using median filter
        bg = cv2.medianBlur(image, 51)
        bg_free = cv2.subtract(image, bg)
        # power filter
        th, highlight = cv2.threshold(bg_free, 10, 255, cv2.THRESH_BINARY)
        cc_num, labels, stats, _ = cv2.connectedComponentsWithStats(highlight)
        # 邻域模板：3*3膨胀即可完成
        kernel = np.ones((3, 3), np.uint8)

        for i in range(cc_num):
            if i == 0:
                # 背景
                continue
            ha = int(image[labels == i].max())
            A = int(stats[i, cv2.CC_STAT_AREA])
            temp = highlight.copy()
            temp[labels != i] = 0
            near_mask = cv2.dilate(temp, kernel)
            near_mask = near_mask - temp
            hb = int(image[near_mask > 0].max())
            P = A * (ha - hb) ** 2
            # area filter and power filter
            # Removal of shapes that lack sufficient constrast using power filter
            # Removal of undersize shapes using area filter
            if P < 1000 or A < 30:
                highlight[labels == i] = 0

        return highlight

    def _match(self, image, highlight, shadow):
        h_num, h_labels, h_stats, h_cents = cv2.connectedComponentsWithStats(highlight)
        s_num, s_labels, s_stats, s_cents = cv2.connectedComponentsWithStats(shadow)
        matched = np.zeros(s_num, dtype=np.uint8)
        pairs = []
        for i in range(h_num):
            if i == 0:
                continue
            # 以质心为准，把阴影区域合并到高光区域
            temp_h = np.zeros_like(highlight)
            temp_h[h_labels == i] = image[h_labels == i]
            hu_h = cv2.HuMoments(cv2.moments(temp_h))
            distance = np.sum(np.power(s_cents - h_cents[i], 2), axis=1)
            # Distance between regions H and S must be smaller than 1.65*sqrt(Area(H))
            ind, *_ = np.where(distance < 2.73 * h_stats[i, cv2.CC_STAT_AREA])
            if hu_h[0] > 3:
                continue

            for j in ind:
                if j == 0 or matched[j] == 1:
                    continue
                rate = h_stats[i, cv2.CC_STAT_AREA] / s_stats[j, cv2.CC_STAT_AREA]
                # The regions H and S should have similar sizes.
                if rate > 4 or rate < 0.25:
                    continue
                temp_s = np.zeros_like(highlight)
                temp_s[s_labels == j] = image[s_labels == j]
                hu_s = cv2.HuMoments(cv2.moments(temp_s))
                if hu_s[0] > 3:
                    continue
                temp = temp_h + temp_s
                hu = cv2.HuMoments(cv2.moments(temp))
                if hu[0] <= 3 and hu[0] <= hu_h[0] and hu[0] <= hu_s[0]:
                    pairs.append((i, j))
                    matched[j] = 1
                    break
        # Apply shape filters to identify highlight and shadow shapes having geometries consistent with begin parts of craters
        for i, j in pairs:
            yield self.union(h_stats[i, :4], s_stats[j, :4])

    def union(self, h_xywh, s_xywh):
        left_h, top_h, width_h, height_h = h_xywh
        left_s, top_s, width_s, height_s = s_xywh
        left = min(left_h, left_s)
        top = min(top_h, top_s)
        right = max(left_h + width_h, left_s + width_s)
        bottom = max(top_h + height_h, top_s + height_s)
        return left, top, right - left, bottom - top
