"""
三元组不变量描述子的基类
"""

import numpy as np
import torch
from ...base import Matching
from utils.ellipse import center_ellipse, ellipse_args
from utils.pose import pose_calculate
from scipy.optimize import linear_sum_assignment
import cv2
import os
from itertools import combinations
from scipy.optimize import linear_sum_assignment

os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True"


class TriadPyramidVote(Matching):
    def __init__(self, catalog_path, device="cuda:0", **kwargs):
        super().__init__(catalog_path, **kwargs)
        self.descriptor = torch.tensor(self.descriptor).to(device)
        self.idx = torch.tensor(self.idx).to(device)
        assert self.descriptor.shape[1] == self.idx.shape[1]
        self.device = device
        ## 用于函数build_pyramids向量化的码表
        self.codebook = torch.tensor(
            [
                ((0, 1, 2, 0, 0), (0, 1, 2, 1, 1), (0, 1, 2, 2, 2), (0, 1, 2, 3, 3)),
                ((0, 1, 2, 1, 0), (0, 1, 2, 0, 1), (0, 2, 1, 2, 3), (0, 2, 1, 3, 2)),
                ((0, 1, 2, 2, 3), (1, 2, 0, 0, 0), (1, 2, 0, 1, 1), (1, 2, 0, 3, 2)),
                ((0, 1, 2, 3, 0), (0, 2, 1, 0, 3), (1, 2, 0, 2, 1), (0, 2, 1, 1, 2)),
            ]
        ).to(device)

    def triad_descriptor(
        self, i, j, k, Q1, Q2, Q3, U1, U2, U3, *args, factor=0.3, **kwargs
    ) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        """计算三元组描述子，单个Q1和U1需要具备向量化计算能力"""
        raise NotImplementedError

    def triad_search(self, triad, index, th, confidence=0.95, max_size=200000):
        """
        从库内搜索阈值范围内的全部三元组，恢复至原始排序
        Arguments:
            triad (np.ndarray) : 三元组的特征向量，30维
            th (float) : 搜索阈值，形状应当与描述子相同
        """
        # 返回误差低于匹配阈值范围的三元组序号
        assert triad.shape == th.shape
        # desc = self.descriptor[:, None].repeat_interleave(triad.shape[1], axis=1)
        # 30个特征维度压缩至同一个维度，使用&操作
        ind = (
            (self.descriptor[:, None] - triad[..., None]).abs() <= th[..., None]
        ).all(dim=0)
        if (ind.sum(dim=1) > max_size).any() or (ind.sum(dim=1) == 0).any():
            return None
        desc = []
        for i in range(ind.shape[0]):
            tri = self.idx[:, ind[i]][index[..., i]]
            desc.append(tri)
        return desc

    def build_pyramids_(self, ABC):
        """以IJK为底座，能否以最短的三元组作为底座？TODO
        ABC : 三元组的描述子，按输入顺序分别是IJK, IJL, JKL, IKL
        """
        min_len_id = np.argmin([tri.shape[1] for tri in ABC])
        cycle = self.codebook[min_len_id, :, 3]
        permute = self.codebook[min_len_id, :, :3]
        retrie = self.codebook[min_len_id, :, 4]
        ## 这里实在是太复杂了！！
        L_IJ_ind = (
            ABC[cycle[0]][permute[0, 0], :, None] == ABC[cycle[1]][permute[1, 0]]
        ) & (ABC[cycle[0]][permute[0, 1], :, None] == ABC[cycle[1]][permute[1, 1]])
        L_JK_ind = (
            ABC[cycle[0]][permute[0, 1], :, None] == ABC[cycle[2]][permute[2, 0]]
        ) & (ABC[cycle[0]][permute[0, 2], :, None] == ABC[cycle[2]][permute[2, 1]])
        L_IK_ind = (
            ABC[cycle[0]][permute[0, 0], :, None] == ABC[cycle[3]][permute[3, 0]]
        ) & (ABC[cycle[0]][permute[0, 2], :, None] == ABC[cycle[3]][permute[3, 1]])
        num_ind = torch.argwhere(
            (L_IJ_ind.sum(dim=1) > 0)
            & (L_JK_ind.sum(dim=1) > 0)
            & (L_IK_ind.sum(dim=1) > 0)
        )
        for it in num_ind:
            L_IJ = ABC[cycle[1]][permute[1, 2], L_IJ_ind[it[0]]]
            L_JK = ABC[cycle[2]][permute[2, 2], L_JK_ind[it[0]]]
            L_IK = ABC[cycle[3]][permute[3, 2], L_IK_ind[it[0]]]
            unique_elements, counts = torch.unique(
                torch.cat((L_IJ, L_JK, L_IK)), return_counts=True
            )
            Ls = unique_elements[counts == 3]
            if Ls.shape[0] > 0:
                for L in Ls:
                    # 恢复原始顺序
                    out = torch.tensor(
                        (*ABC[cycle[0]][:, it[0]], L), device=self.device
                    )
                    yield out[retrie].cpu().numpy()

    def build_pyramids(self, ABC):
        """以IJK为底座"""
        IJK, IJL, JKL, IKL = ABC
        L_IJ_ind = (IJK[0, :, None] == IJL[0]) & (IJK[1, :, None] == IJL[1])
        L_JK_ind = (IJK[1, :, None] == JKL[0]) & (IJK[2, :, None] == JKL[1])
        L_IK_ind = (IJK[0, :, None] == IKL[0]) & (IJK[2, :, None] == IKL[1])
        num_ind = torch.argwhere(
            L_IJ_ind.any(dim=1) & L_JK_ind.any(dim=1) & L_IK_ind.any(dim=1)
        ).squeeze(1)
        if num_ind.shape[0] == 0:
            return []
        # 加强条件，只取各行均为一个元素的行
        ind = (
            (L_IJ_ind[num_ind].sum(dim=1) == 1)
            & (L_JK_ind[num_ind].sum(dim=1) == 1)
            & (L_IK_ind[num_ind].sum(dim=1) == 1)
        )
        L_IJ = ABC[1][2, None].repeat_interleave(num_ind.shape[0], dim=0)[
            L_IJ_ind[num_ind] & ind[:, None]
        ]
        L_JK = ABC[2][2, None].repeat_interleave(num_ind.shape[0], dim=0)[
            L_JK_ind[num_ind] & ind[:, None]
        ]
        L_IK = ABC[3][2, None].repeat_interleave(num_ind.shape[0], dim=0)[
            L_IK_ind[num_ind] & ind[:, None]
        ]
        Ls_ind = (L_IJ == L_JK) & (L_IJ == L_IK)
        # 恢复原始顺序
        out = torch.stack((*IJK[:, num_ind[ind][Ls_ind]], L_IJ[Ls_ind]), dim=1)
        return out.cpu().numpy()

    def identify(self, params, uncertainty, factor, *args, max_try=5, **kwargs):
        """
        输入应该越接近于真实的检测输出越好。因此输入应当是一堆椭圆的拟合参数，输出应当是匹配确定的结果，即椭圆ID的列表。
        Arguments:
            params (np.ndarray) : ellipse parameters for (x^2, xy, y^2, x, y, 1)
        Returns:
            list : matched ellipse ID
        """
        # 将陨石坑按直径排序
        params = np.array(params)
        elli = ellipse_args(params)
        diameters = elli[2] + elli[3]
        idx = np.argsort(-diameters)
        # 从最大的直径开始，选取陨石坑构建金字塔
        if len(idx) < 4:
            return None
        i, j, k = np.array(list(combinations(idx, 3))).T
        ijk, I123, th, index, valid = self.triad_descriptor(
            i, j, k, params[i], params[j], params[k], factor
        )
        if not valid.any():
            return None
        ijkl, IJKL = self.vote_record(
            ijk[..., valid], I123[..., valid], th[..., valid], index[..., valid]
        )
        if ijkl is not None and len(ijkl) >= 4:
            # 成功一次后，直接用当前四点扩展
            success, result = self.extend_identify(
                params, uncertainty, ijkl, IJKL, *args, factor=factor, **kwargs
            )
            if success:
                return result
        return None

    def vote_record(self, ijk, I123, th, index, vote_th=6, N=20):
        """
        投票法确定真实的匹配结果
        Arguments:
            params (np.ndarray) : ellipse parameters for (x^2, xy, y^2, x, y, 1)
        Returns:
            list : matched ellipse ID
        """
        votes = np.zeros((self.catalog.shape[0], self.catalog.shape[0]), dtype=np.int32)
        for it in range(0, ijk.shape[1], N):
            ABC = self.triad_search(
                I123[..., it : it + N], index[..., it : it + N], th[..., it : it + N]
            )
            if ABC is None:
                continue
            abc = (
                ijk[:, it : it + N].repeat([x.shape[1] for x in ABC], axis=1).flatten()
            )
            ABC = torch.concat(ABC, dim=1).cpu().numpy().flatten()
            vals, counts = np.unique((abc, ABC), axis=1, return_counts=True)
            votes[vals[0], vals[1]] += counts
        ## 用匈牙利算法完成识别分配
        ijkl, IJKL = linear_sum_assignment(-votes)
        # 最优分配
        ## 取得票数最高的前四个
        candidate = votes[ijkl, IJKL]
        ind = np.argsort(-candidate)[:vote_th]
        ind = ind[candidate[ind] > 0]
        if ind.shape[0] < 4:
            return None, None
        else:
            return ijkl[ind], IJKL[ind]

    def extend_identify(
        self,
        params,
        uncertainty,
        ijkl,
        IJKL,
        K,
        factor,
        th: float,
        confidence: float,
        dist_th=2,
        **kwargs,
    ):
        """
        TODO 用增量式重投影方法完成对全幅陨石坑的匹配，具体的细节如下：
        1. 利用现有四点ijkl估计出一个摄像机矩阵P（TODO 有可能估计失败）
        2. 根据当前的四点ijkl，设置某个超参数r，搜索四点周围r个点构成候选重投影点，利用P重投影至图像平面，记为Qr
        3. 利用匈牙利算法，对当前的Qr+4个重投影点与当前图像上的全部陨石坑中心点（可能多于Qr+4）作匹配（TODO 算法输出差过大的点应当舍去）。
        4. 对于每个匹配上的陨石坑中心点，计算其不变量以排除潜在的噪声点。
        4. 匹配的结果作为新的ijkl，重新估计P，重复步骤2-3，直到全部匹配上或者达到最大迭代次数
        """
        # 返回第一符合要求的值
        # 将金字塔识别结果拓展至整个图像
        if len(ijkl) < 4:
            return False, None
        cnts = np.array(center_ellipse(params)).T
        ijkl, IJKL = self.increment_hungary(
            params, uncertainty, ijkl, IJKL, K, factor, dist_th=dist_th
        )
        if len(ijkl) < 4:
            return False, None
            # 重投影验证
        points_3d = self.catalog[IJKL, :]
        obse_2d = cnts[ijkl, :]
        R, T = pose_calculate(K, points_3d, obse_2d)
        if R is None:
            return False, None
        uv = cv2.projectPoints(points_3d, R, T, K, np.zeros(4))[0].squeeze()
        ind = np.linalg.norm(uv - obse_2d, axis=1) < th
        if ind.mean() >= confidence:
            return True, (ijkl[ind], IJKL[ind])
        else:
            return False, None

    def increment_hungary(
        self,
        params,
        uncertainty,
        ijkl: tuple,
        IJKL: tuple,
        K,
        factor,
        filter_invt=False,
        dist_th=2.0,
        confidence=0.8,
    ):
        """
        用增量式重投影方法完成对全幅陨石坑的匹配，具体的细节如下：
        1. 利用现有四点ijkl估计出一个摄像机矩阵P（TODO 有可能估计失败）
        2. 根据当前的四点ijkl，设置某个超参数r，搜索四点周围r个点构成候选重投影点，利用P重投影至图像平面，记为Qr
        3. 利用匈牙利算法，对当前的Qr+4个重投影点与当前图像上的全部陨石坑中心点（可能多于Qr+4）作匹配；
        4. 对于每个匹配上的陨石坑中心点，计算其不变量以排除潜在的噪声点。
        4. 匹配的结果作为新的ijkl，重新估计P，重复步骤2-3，直到全部匹配上或者达到最大迭代次数
        Arguments:
            ijkl (tuple) : 当前图像每个被匹配的陨石坑序号
            IJKL (tuple) : 当前图像每个被匹配的陨石坑在目录中的序号
            K (np.ndarray) : 相机内参矩阵
            filter_invt (bool) : 是否使用不变量验证
            dist_th (float) : 仅在filter_invt=True时有效匹配距离阈值
            factor (float) : 仅在filter_invt=True时有效，不变量验证的阈值
            confidence (float) : 仅在filter_invt=True时有效，用于筛选错误匹配的置信概率，推荐取值不超过0.9，低噪声情况下可适当取低如0.7甚至0.65均可取得相当好的效果
        """
        # 估计P
        points_3d = self.catalog[IJKL, :]
        cnts = np.array(center_ellipse(params)).T
        obse_2d = cnts[ijkl, :]
        R, T = pose_calculate(K, points_3d, obse_2d)
        if R is None:
            return [], []
        # 重投影
        uv = cv2.projectPoints(self.catalog, R, T, K, np.zeros(4))[0].squeeze()
        # 把当前可能位于图像平面内的全部点都加入至候选列表中
        ind = (
            (uv[:, 0] < K[0, 2] * 2)
            & (uv[:, 0] >= 0)
            & (uv[:, 1] < K[1, 2] * 2)
            & (uv[:, 1] >= 0)
        )
        # 得到全部候选中心点列表
        cata_3d = uv[ind]
        cata_id = np.arange(self.catalog.shape[0])[ind]
        # 获取当前全部图像中心点
        obse_id = np.arange(len(cnts))
        # 匈牙利算法
        dist_matrix = np.linalg.norm(cata_3d[:, np.newaxis] - cnts, axis=2)
        # 使用匈牙利算法计算最优分配
        row_ind, col_ind = linear_sum_assignment(dist_matrix)
        # 最优分配
        cata_id = cata_id[row_ind]
        obse_id = obse_id[col_ind]
        # 计算匹配误差，本误差也将作为筛选真实陨石坑和虚假陨石坑的依据
        error = dist_matrix[row_ind, col_ind]
        # 计算不变量验证
        ## 具体的步骤应当包括：
        ## 1. 根据匈牙利算法匹配出的距离远近和设计的超参数dist_th，选出符合匹配距离范围内的候选陨石坑
        ## 2. 全部候选陨石坑两两作对，算出不变量以及配对
        ## 3. 根据配对关系，给出导航坑表内的不变量对
        # 挑选匹配距离范围内的陨石坑
        ind = error < dist_th
        cata_id = cata_id[ind]
        obse_id = obse_id[ind]
        if filter_invt:
            # 新增一个哈希列表暂存每对陨石坑错误匹配的次数
            err_num = np.zeros_like(obse_id, dtype=np.int16)
            # 计算容错数量阈值，该值为 n - 0.5 - sqrt((th*n^2-th*n+0.25)，计算过程见于汇报2024年9月5日
            num_th = (obse_id.shape[0] - 1) * (obse_id.shape[0] - 1) / 6
            num_th = (
                num_th
                - 0.5
                - np.sqrt(confidence * num_th * num_th - confidence * num_th + 0.25)
            )
            # 随机构成三元组，减少计算量
            if obse_id.shape[0] < 4:
                return [], []
            obse_id_permute = np.arange(obse_id.shape[0]).tolist()
            np.random.shuffle(obse_id_permute)
            while len(obse_id_permute) > 2:
                i, j = obse_id_permute.pop(0), obse_id_permute.pop(0)
                k = obse_id_permute
                ijk, i123, th, index = self.triad_descriptor(
                    i,
                    j,
                    k,
                    params[obse_id[i]],
                    params[obse_id[j]],
                    params[obse_id[k]],
                    uncertainty[obse_id[i]],
                    uncertainty[obse_id[j]],
                    uncertainty[obse_id[k]],
                    factor,
                )
                if ijk is None:
                    continue
                idx = np.argwhere(
                    (
                        self.idx[0, None].repeat(ijk.shape[1], axis=0)
                        - cata_id[ijk[0], None]
                        == 0
                    )
                    & (
                        self.idx[1, None].repeat(ijk.shape[1], axis=0)
                        - cata_id[ijk[1], None]
                        == 0
                    )
                    & (
                        self.idx[2, None].repeat(ijk.shape[1], axis=0)
                        - cata_id[ijk[2], None]
                        == 0
                    )
                )
                i123 = i123[:, idx[:, 0]]
                I123 = self.descriptor[:, idx[:, 1]]
                err_num[i] = (
                    err_num[i]
                    + (
                        (np.abs(i123 - I123) > th[:, idx[:, 0]]).sum(axis=0)
                        > (1 - confidence**2) * th.shape[0]
                    ).sum()
                )
                err_num[j] = (
                    err_num[j]
                    + (
                        (np.abs(i123 - I123) > th[:, idx[:, 0]]).sum(axis=0)
                        > (1 - confidence**2) * th.shape[0]
                    ).sum()
                )

            # 删除错误匹配
            ind = err_num < num_th
            cata_id = cata_id[ind]
            obse_id = obse_id[ind]
        return obse_id, cata_id
