from typing import Optional

import numpy as np
import torch
import torch.nn as nn

from sgreg.ops.transformation import apply_transform
from sgreg.registration import WeightedProcrustes


class LocalGlobalRegistration(nn.Module):
    def __init__(
        self,
        k: int,
        acceptance_radius: float,
        max_instance_selection: bool,
        mutual: bool = True,
        confidence_threshold: float = 0.05,
        use_dustbin: bool = False,
        use_global_score: bool = False,
        correspondence_threshold: int = 3,
        correspondence_limit: Optional[int] = None,
        num_refinement_steps: int = 5,
    ):
        r"""Point Matching with Local-to-Global Registration.

        Args:
            k (int): top-k selection for point matching.
            acceptance_radius (float): acceptance radius for LGR.
            mutual (bool=True): mutual or non-mutual matching.
            confidence_threshold (float=0.05): ignore matches whose scores are below this threshold.
            use_dustbin (bool=False): whether dustbin row/column is used in the score matrix.
            use_global_score (bool=False): whether use patch correspondence scores.
            correspondence_threshold (int=3): minimal number of correspondences for each patch correspondence.
            correspondence_limit (optional[int]=None): maximal number of verification correspondences.
            num_refinement_steps (int=5): number of refinement steps.
        """
        super(LocalGlobalRegistration, self).__init__()
        self.k = k
        self.acceptance_radius = acceptance_radius
        self.max_instance_selection = max_instance_selection
        self.mutual = mutual
        self.confidence_threshold = confidence_threshold
        self.use_dustbin = use_dustbin
        self.use_global_score = use_global_score
        self.correspondence_threshold = correspondence_threshold
        self.correspondence_limit = correspondence_limit
        self.num_refinement_steps = num_refinement_steps
        self.procrustes = WeightedProcrustes(return_transform=True)

    def compute_correspondence_matrix(self, score_mat, ref_knn_masks=None, src_knn_masks=None):
        r"""
        Compute matching matrix and score matrix for each patch correspondence.
        Input:
        - score_mat (Tensor): (B, K, K), or (B, K+1, K+1) if it uses dustbin.
        """

        batch_size, ref_length, src_length = score_mat.shape
        batch_indices = torch.arange(batch_size).cuda()

        # correspondences from reference side
        ref_topk_scores, ref_topk_indices = score_mat.topk(k=self.k, dim=2)  # (B, N, K)
        ref_batch_indices = batch_indices.view(batch_size, 1, 1).expand(-1, ref_length, self.k)  # (B, N, K)
        ref_indices = torch.arange(ref_length).cuda().view(1, ref_length, 1).expand(batch_size, -1, self.k)  # (B, N, K)
        ref_score_mat = torch.zeros_like(score_mat)
        ref_score_mat[ref_batch_indices, ref_indices, ref_topk_indices] = ref_topk_scores
        ref_corr_mat = torch.gt(ref_score_mat, self.confidence_threshold)

        # correspondences from source side
        src_topk_scores, src_topk_indices = score_mat.topk(k=self.k, dim=1)  # (B, K, N)
        src_batch_indices = batch_indices.view(batch_size, 1, 1).expand(-1, self.k, src_length)  # (B, K, N)
        src_indices = torch.arange(src_length).cuda().view(1, 1, src_length).expand(batch_size, self.k, -1)  # (B, K, N)
        src_score_mat = torch.zeros_like(score_mat)
        src_score_mat[src_batch_indices, src_topk_indices, src_indices] = src_topk_scores
        src_corr_mat = torch.gt(src_score_mat, self.confidence_threshold)

        # merge results from two sides
        if self.mutual:
            corr_mat = torch.logical_and(ref_corr_mat, src_corr_mat)
        else:
            corr_mat = torch.logical_or(ref_corr_mat, src_corr_mat)

        if self.use_dustbin:
            corr_mat = corr_mat[:, :-1, :-1]
        
        if ref_knn_masks is not None and src_knn_masks is not None:
            mask_mat = torch.logical_and(ref_knn_masks.unsqueeze(2), src_knn_masks.unsqueeze(1))
            corr_mat = torch.logical_and(corr_mat, mask_mat)

        return corr_mat

    @staticmethod
    def convert_to_batch(ref_corr_points, src_corr_points, corr_scores, chunks):
        r"""Convert stacked correspondences to batched points.

        The extracted dense correspondences from all patch correspondences are stacked. However, to compute the
        transformations from all patch correspondences in parallel, the dense correspondences need to be reorganized
        into a batch.

        Args:
            ref_corr_points (Tensor): (C, 3)
            src_corr_points (Tensor): (C, 3)
            corr_scores (Tensor): (C,)
            chunks (List[Tuple[int, int]]): the starting index and ending index of each patch correspondences.

        Returns:
            batch_ref_corr_points (Tensor): (B, K, 3), padded with zeros.
            batch_src_corr_points (Tensor): (B, K, 3), padded with zeros.
            batch_corr_scores (Tensor): (B, K), padded with zeros.
        """
        batch_size = len(chunks)
        indices = torch.cat([torch.arange(x, y) for x, y in chunks], dim=0).cuda()
        ref_corr_points = ref_corr_points[indices]  # (total, 3)
        src_corr_points = src_corr_points[indices]  # (total, 3)
        corr_scores = corr_scores[indices]  # (total,)

        max_corr = np.max([y - x for x, y in chunks])
        target_chunks = [(i * max_corr, i * max_corr + y - x) for i, (x, y) in enumerate(chunks)]
        indices = torch.cat([torch.arange(x, y) for x, y in target_chunks], dim=0).cuda()
        indices0 = indices.unsqueeze(1).expand(indices.shape[0], 3)  # (total,) -> (total, 3)
        indices1 = torch.arange(3).unsqueeze(0).expand(indices.shape[0], 3).cuda()  # (3,) -> (total, 3)

        batch_ref_corr_points = torch.zeros(batch_size * max_corr, 3).cuda()
        batch_ref_corr_points.index_put_([indices0, indices1], ref_corr_points)
        batch_ref_corr_points = batch_ref_corr_points.view(batch_size, max_corr, 3)

        batch_src_corr_points = torch.zeros(batch_size * max_corr, 3).cuda()
        batch_src_corr_points.index_put_([indices0, indices1], src_corr_points)
        batch_src_corr_points = batch_src_corr_points.view(batch_size, max_corr, 3)

        batch_corr_scores = torch.zeros(batch_size * max_corr).cuda()
        batch_corr_scores.index_put_([indices], corr_scores)
        batch_corr_scores = batch_corr_scores.view(batch_size, max_corr)

        return batch_ref_corr_points, batch_src_corr_points, batch_corr_scores

    def recompute_correspondence_scores(self, ref_corr_points, src_corr_points, corr_scores, estimated_transform):
        '''
        Compute outliers using the estimated transform.
        Update correspondence scores by removing outliers.
        '''
        aligned_src_corr_points = apply_transform(src_corr_points, estimated_transform)
        corr_residuals = torch.linalg.norm(ref_corr_points - aligned_src_corr_points, dim=1)
        inlier_masks = torch.lt(corr_residuals, self.acceptance_radius)
        new_corr_scores = corr_scores * inlier_masks.float()
        return new_corr_scores

    def extract_correspondences(self, ref_knn_points, src_knn_points, score_mat, corr_mat):
        
        # extract dense correspondences
        batch_indices, ref_indices, src_indices = torch.nonzero(corr_mat, as_tuple=True)
        global_ref_corr_points = ref_knn_points[batch_indices, ref_indices] # (P, 3)
        global_src_corr_points = src_knn_points[batch_indices, src_indices] # (P, 3)
        global_corr_scores = score_mat[batch_indices, ref_indices, src_indices] # (P,)
        # print('{} global correspondences'.format(global_src_corr_points.shape[0]))
        M_ = ref_knn_points.shape[0]
        K_ = ref_knn_points.shape[1]
        
        return global_ref_corr_points, global_src_corr_points, global_corr_scores, batch_indices

    def local_to_global_registration(self, ref_knn_points, src_knn_points, score_mat, corr_mat):
        '''
        Args:
            ref_knn_points (Tensor): (B, K, 3), (128, 64, 3)
            src_knn_points (Tensor): (B, K, 3)
            score_mat (Tensor): (B, K, K)
            corr_mat (Tensor): (B, K, K), (128, 64, 64), select topk
        '''

        if ref_knn_points.ndim>3:
            ref_knn_points = ref_knn_points.squeeze(0)
        if src_knn_points.ndim>3:
            src_knn_points = src_knn_points.squeeze(0)

        global_ref_corr_points, global_src_corr_points, global_corr_scores, batch_indices = self.extract_correspondences(
            ref_knn_points, src_knn_points, score_mat, corr_mat
        )

        # build verification set
        if self.correspondence_limit is not None and global_corr_scores.shape[0] > self.correspondence_limit:
            corr_scores, sel_indices = global_corr_scores.topk(k=self.correspondence_limit, largest=True)
            ref_corr_points = global_ref_corr_points[sel_indices]
            src_corr_points = global_src_corr_points[sel_indices]
        else:
            ref_corr_points = global_ref_corr_points
            src_corr_points = global_src_corr_points
            corr_scores = global_corr_scores

        if self.max_instance_selection:
            # compute starting and ending index of each patch correspondence.
            # torch.nonzero is row-major, so the correspondences from the same patch correspondence are consecutive.
            # find the first occurrence of each batch index, then the chunk of this batch can be obtained.
            unique_masks = torch.ne(batch_indices[1:], batch_indices[:-1])
            unique_indices = torch.nonzero(unique_masks, as_tuple=True)[0] + 1
            unique_indices = unique_indices.detach().cpu().numpy().tolist()
            unique_indices = [0] + unique_indices + [batch_indices.shape[0]]
            chunks = [
                (x, y) for x, y in zip(unique_indices[:-1], unique_indices[1:]) if y - x >= self.correspondence_threshold
            ]

            batch_size = len(chunks)
            # assert batch_size>0
            if batch_size > 0:
                # local registration
                batch_ref_corr_points, batch_src_corr_points, batch_corr_scores = self.convert_to_batch(
                    global_ref_corr_points, global_src_corr_points, global_corr_scores, chunks
                ) # (B, K', 3), (B, K', 3), (B, K'), eg. K'=35
                batch_transforms = self.procrustes(batch_src_corr_points, batch_ref_corr_points, batch_corr_scores) # (128, 4, 4)
                batch_aligned_src_corr_points = apply_transform(src_corr_points.unsqueeze(0), batch_transforms)
                batch_corr_residuals = torch.linalg.norm(
                    ref_corr_points.unsqueeze(0) - batch_aligned_src_corr_points, dim=2
                )
                batch_inlier_masks = torch.lt(batch_corr_residuals, self.acceptance_radius) # (B, P)
                best_index = batch_inlier_masks.sum(dim=1).argmax()
                cur_corr_scores = corr_scores * batch_inlier_masks[best_index].float() # (P,)
            else:
                # degenerate: initialize transformation with all correspondences
                estimated_transform = self.procrustes(src_corr_points, ref_corr_points, corr_scores)
                cur_corr_scores = self.recompute_correspondence_scores(
                    ref_corr_points, src_corr_points, corr_scores, estimated_transform
                )
        else:
            cur_corr_scores = corr_scores

        # global refinement
        estimated_transform = self.procrustes(src_corr_points, ref_corr_points, cur_corr_scores)
        for _ in range(self.num_refinement_steps - 1):
            cur_corr_scores = self.recompute_correspondence_scores(
                ref_corr_points, src_corr_points, corr_scores, estimated_transform
            )
            estimated_transform = self.procrustes(src_corr_points, ref_corr_points, cur_corr_scores)

        return global_ref_corr_points, global_src_corr_points, global_corr_scores, batch_indices, estimated_transform

    def forward(
        self,
        ref_knn_points,
        src_knn_points,
        score_mat,
        corrs_only=False,
        global_scores=None,        
        ref_knn_masks=None,
        src_knn_masks=None,
    ):
        r"""Point Matching Module forward propagation with Local-to-Global registration.
            There are B pairs of matched nodes.
        Args:
            ref_knn_points (Tensor): (B, K, 3)
            src_knn_points (Tensor): (B, K, 3)
            ref_knn_masks (BoolTensor): (B, K)
            src_knn_masks (BoolTensor): (B, K)
            score_mat (Tensor): (B, K, K) or (B, K + 1, K + 1), log likelihood
            global_scores (Tensor): (B,)

        Returns:
            ref_corr_points: torch.LongTensor (C, 3)
            src_corr_points: torch.LongTensor (C, 3)
            corr_scores: torch.Tensor (C,)
            estimated_transform: torch.Tensor (4, 4)
        """
        # assert score_mat.ndim == 3
        if score_mat.ndim>3:
            print('score mat in innormal shape',score_mat.shape)
            score_mat = score_mat.squeeze(0)
        score_mat = torch.exp(score_mat)

        corr_mat = self.compute_correspondence_matrix(score_mat, 
                                                      ref_knn_masks, 
                                                      src_knn_masks)  # (B, K, K)

        if self.use_dustbin:
            score_mat = score_mat[:, :-1, :-1]
        if self.use_global_score:
            score_mat = score_mat * global_scores.view(-1, 1, 1)
        score_mat = score_mat * corr_mat.float()
        
        if corrs_only:
            ref_corr_points, src_corr_points, corr_scores, batch_indices = self.extract_correspondences(
                ref_knn_points, src_knn_points, score_mat, corr_mat)
            estimated_transform = torch.eye(4).cuda()
        else:
            ref_corr_points, src_corr_points, corr_scores, batch_indices, estimated_transform = self.local_to_global_registration(
                ref_knn_points, src_knn_points, score_mat, corr_mat)

        return ref_corr_points, src_corr_points, corr_scores, batch_indices, estimated_transform
