# cython: language_level=3


import math
import torch
torch.set_grad_enabled(False)
import cv2
import kornia as K
import kornia.feature as KF
import numpy as np
import torch


def add_ones(x):
    if len(x.shape) == 1:
        return np.concatenate([x, np.array([1.0])], axis=0)
    else:
        return np.concatenate([x, np.ones((x.shape[0], 1))], axis=1)

def rotate_image_bound_with_M(image, angle):
    if angle == 0:
        return image,np.array([[1,0,0],[0,1,0]])

    # grab the dimensions of the image and then determine the
    # center
    (h, w) = image.shape[:2]
    (cX, cY) = (w // 2, h // 2)

    # grab the rotation matrix (applying the negative of the
    # angle to rotate clockwise), then grab the sine and cosine
    # (i.e., the rotation components of the matrix)
    M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
    cos = np.abs(M[0, 0])
    sin = np.abs(M[0, 1])

    # compute the new bounding dimensions of the image
    nW = int(0.5 + (h * sin) + (w * cos))
    nH = int(0.5 + (h * cos) + (w * sin))

    # adjust the rotation matrix to take into account translation
    M[0, 2] += (nW / 2) - cX
    M[1, 2] += (nH / 2) - cY
    # perform the actual rotation and return the image
    return cv2.warpAffine(image, M, (nW, nH), flags=cv2.INTER_AREA, borderValue=(255, 255, 255)),M

def rotate_image_bound(image, angle):
    img, M = rotate_image_bound_with_M(image, angle)
    return img

def calRotateAngleFromMatch(mkpts0, mkpts1):
    # mat = cv2.estimateAffinePartial2D(mkpts0, mkpts1)
    # mat2 = cv2.estimateAffinePartial2D(mkpts1, mkpts0)
    mat = cv2.estimateAffine2D(mkpts0, mkpts1)
    mat2 = cv2.estimateAffine2D(mkpts1, mkpts0)
    mkpts0 = mkpts0[mat2[1].ravel()==1]
    mkpts1 = mkpts1[mat2[1].ravel() == 1]
    # np.dot(Mat, add_ones(mkpts1).T).T[:, 0:2]
    # mat1 = cv2.findHomography(mkpts1, mkpts0)
    # mat2 = np.row_stack((mat2[0], np.array([0, 0, 1])))
    # M_inv = np.mat(np.linalg.inv(mat2))
    return calRotateAngleFromMatrix(mat), mat2[0],mkpts0,mkpts1

def calTransformationFromMatch(mkpts0, mkpts1):
    mat = cv2.estimateAffinePartial2D(mkpts0, mkpts1)
    return mat[0][:,2].T

def calRotateAngleFromMatrix(mat):
    rmat = mat[0][:2, :2]
    det = np.linalg.det(rmat)
    rmat_normal = rmat / (det ** 0.5)
    temp = rmat_normal[1, 0]
    if abs(temp) > 1:
        temp = int(temp)
    angle = math.asin(temp) * 180 / math.pi
    return angle




class RLoftrMatcher(object):
    def __init__(self, config={}):
        super().__init__()

    def match(self,path0, path1, nrotate=1):
        img0 = cv2.imread(path0)
        img1 = K.io.load_image(path1, K.io.ImageLoadType.RGB32,'cuda')[None, ...]
        if (img0 is None) or (img1 is None):
            print('Error: Image file is not found!')
            return None

        img1shape = img1.shape
        mkpts0_all = np.empty((0, 2))
        mkpts1_all = np.empty((0, 2))

        step = 360.0 / nrotate
        matcher = KF.LoFTR(pretrained="outdoor").to('cuda')
        for rot in range(nrotate):
            img_r, M = rotate_image_bound_with_M(img0, rot * step)
            M = np.row_stack((M, np.array([0, 0, 1])))
            M_inv = np.mat(np.linalg.inv(M))
            img_r = (K.image_to_tensor(img_r, False).float() / 255.).to('cuda')
            # cv2.imwrite('temp.jpg', img_r)
            # img_r = K.io.load_image('temp.jpg', K.io.ImageLoadType.RGB32,'cuda')[None, ...]
            input_dict = {
                "image0": K.color.rgb_to_grayscale(img_r),  # LofTR works on grayscale images only
                "image1": K.color.rgb_to_grayscale(img1),
            }
            with torch.inference_mode():
                correspondences = matcher(input_dict)
                # for k, v in correspondences.items():
                #     print(k)
            mkpts0 = correspondences["keypoints0"].cpu().numpy()
            mkpts1 = correspondences["keypoints1"].cpu().numpy()
            if mkpts0 is None:
                mkpts0 = mkpts1 = np.array([]).reshape(-1, 2)
            else:
                if len(mkpts0) > 25:
                    Fm, inliers = cv2.findFundamentalMat(mkpts0, mkpts1, cv2.FM_RANSAC, 0.5, 0.999, 100000)
                    inliers = inliers > 0
                    mkpts0 = mkpts0[inliers.reshape(-1, )]
                    mkpts1 = mkpts1[inliers.reshape(-1, )]
                if len(mkpts0) > 15:
                    H, inliers = cv2.findHomography(mkpts0, mkpts1, cv2.FM_RANSAC, 0.5, 0.99)
                    inliers = inliers > 0
                    mkpts0 = mkpts0[inliers.reshape(-1, )]
                    mkpts1 = mkpts1[inliers.reshape(-1, )]
            # unproject points
            hmkpts0 = add_ones(mkpts0)
            rhmkpts0 = (M_inv * hmkpts0.T).A.T[:, 0:2]

            mkpts0_all = np.vstack((mkpts0_all, rhmkpts0))
            mkpts1_all = np.vstack((mkpts1_all, mkpts1))
            # conf0_all = np.hstack((conf0_all, conf))

        #get main direction
        maindirection = 0
        if len(mkpts0_all)>6:
            maindirection,Mat1,mkpts0_all, mkpts1_all = calRotateAngleFromMatch(mkpts0_all, mkpts1_all)
        else:
            print('Waning: Matching points are not enough.')
            Mat1 = [1]
        return mkpts0_all, mkpts1_all, len(mkpts0_all),maindirection,Mat1

