"""基于论文[1] Christian J. A. , Derksen H. , Watkins R. .Lunar crater identification in digital Images[J/OL].J. Astronaut. Sci.,2021,68(4):1056-1144

"""

from .triad import TriadHashBuilder
from utils.ellipse import radius_ellipse
import numpy as np
import pandas as pd


class ChristianHashBuilder(TriadHashBuilder):
    def invariant(self, i, j, k, Q1, Q2, Q3, N_s=None) -> tuple[np.ndarray, np.ndarray]:
        """ """
        N_s = N_s if N_s else np.inf
        N = Q3.shape[0]
        Q1 = Q1[None].repeat(N, axis=0)
        Q2 = Q2[None].repeat(N, axis=0)
        d3 = np.mean(radius_ellipse(Q3), axis=0)
        ind = np.argsort(d3)
        remain_ind = ind[ind < N_s]
        # 按不变量值从小到大排序
        I1 = np.trace(Q2 @ np.linalg.inv(Q3), axis1=1, axis2=2) * np.trace(
            Q3 @ np.linalg.inv(Q2), axis1=1, axis2=2
        )
        I2 = np.trace(Q3 @ np.linalg.inv(Q1), axis1=1, axis2=2) * np.trace(
            Q1 @ np.linalg.inv(Q3), axis1=1, axis2=2
        )
        I3 = np.trace(Q1 @ np.linalg.inv(Q2), axis1=1, axis2=2) * np.trace(
            Q2 @ np.linalg.inv(Q1), axis1=1, axis2=2
        )
        # 每行是一个3x6的点列，每三个元素代表一个点
        I = np.array((I1, I2, I3))
        # 按数值从小到大排序
        ind = np.argsort(I, axis=0)
        ijk = np.vstack((np.ones_like(k) * i, np.ones_like(k) * j, k))
        # 按直径从小到大排序
        ijk = np.take_along_axis(ijk, ind, axis=0)
        return ijk[:, remain_ind], I[:, remain_ind]

    def quantize(self, idx, invariants, N_b=1024):
        """设计Hash函数，支持向量化操作
        Args:
            invariants: np.ndarray, shape=(2, N), N为描述子的数量
            N_b: int, 量化的栅格数
        """
        qs = np.log(np.abs(invariants))
        min_qs = qs.min()
        max_qs = qs.max()
        ## WARNING 注意！这里的指数是3，不是其他通常见到的4
        qs = np.int32(np.power((qs - qs.min()) / (qs.max() - qs.min()), 3) * (N_b - 1))
        # 找出相同的值
        # vals, counts = np.unique(qs[0], return_counts=True)
        # ind_0 = qs[0] == vals[counts > 1][:, None]
        # vals, counts = np.unique(qs[1], return_counts=True)
        # ind_1 = qs[1] == vals[counts > 1][:, None]
        # vals, counts = np.unique(qs[2], return_counts=True)
        # ind_2 = qs[2] == vals[counts > 1][:, None]
        # for n in range(N_b):
        #     ind_2 |= ((qs[1] == index[n, None]) & ind_1 & ind_0).any(axis=0)
        # print("Totally abundant values: ", ind_2.sum())
        ## 将多个坐标压缩为一个Hash值，检查压缩前后不产生新的冗余值
        hs = qs[0] * N_b * N_b + qs[1] * N_b + qs[2]
        vals, counts = np.unique(hs, return_counts=True)
        print("Totally abundant values: ", counts[counts > 1].sum())
        # assert counts[counts > 1].sum() == ind_2.sum()
        # 构建哈希表
        ## 由于需要保存全部冗余的陨石坑，因此不能使用线性表，而应当使用类似字典的结构
        ## 这里直接使用dict实现即可，据说pandas有快速字典Series，这里选用pandas的Series
        df = pd.DataFrame({"hs": hs, "idx_0": idx[0], "idx_1": idx[1], "idx_2": idx[2]})
        df.set_index("hs", inplace=True)
        # 用DataFrame的批操作代替循环操作，提速100倍
        df = df.groupby(level=0).apply(lambda x: x.values.tolist()).to_dict()
        return idx, df, {"dist": (min_qs, max_qs), "N_b": N_b}
