import numpy as np
import cv2


class Panaroma:
    '''
        图像拼接函数，将多张图像拼接成全景图像
        参数：
        images: 输入的图像列表
        lowe_ratio: Lowe's ratio测试的阈值
        max_Threshold: RANSAC算法的阈值
        match_status: 是否返回匹配状态
    '''
    def image_stitch(self, images, lowe_ratio=0.75, max_Threshold=4.0, match_status=False):
        # detect the features and keypoints from SIFT
        (imageB, imageA) = images
        # 检测特征点并计算特征描述子
        (key_points_A, features_of_A) = self.detect_feature_and_keypoints(imageA)
        (key_points_B, features_of_B) = self.detect_feature_and_keypoints(imageB)

        # get the valid matched points
        # 匹配特征点
        Values = self.match_keypoints(key_points_A, key_points_B, features_of_A, features_of_B, lowe_ratio, max_Threshold)
        if Values is None:
            return None

        # get wrap perspective of image using computed homography
        # 计算单应性矩阵
        (matches, Homography, status) = Values
        # 获取透视变换后的图像
        result_image = self.get_warp_perspective(imageA, imageB, Homography)
        result_image[0:imageB.shape[0], 0:imageB.shape[1]] = imageB

        # check to see if the keypoint matches should be visualized
        if match_status:
            vis = self.draw_matches(imageA, imageB, key_points_A, key_points_B, matches, status)
            return result_image, vis
        # 返回拼接后的图像
        return result_image

    '''
        获取透视变换后的图像
        参数：
        imageA: 第一张图像
        imageB: 第二张图像
        Homography: 单应性矩阵
    '''
    def get_warp_perspective(self, imageA, imageB, Homography):
        # 计算拼接后图像的宽度
        val = imageA.shape[1] + imageB.shape[1]
        # 使用透视变换将第二张图像变换到第一张图像的坐标系下
        result_image = cv2.warpPerspective(imageA, Homography, (val, imageA.shape[0]))
        # 返回透视变换后的图像
        return result_image

    '''
        用于检测图像特征和关键点
        使用了SIFT算法来检测和提取图像的特征
    '''
    def detect_feature_and_keypoints(self, image):
        # detect and extract features from the image
        # 创建一个SIFT对象
        descriptors = cv2.SIFT_create()
        # 使用SIFT对象的detectAndCompute()方法来检测和计算图像的关键点和特征
        (keypoints, features) = descriptors.detectAndCompute(image, None)
        # 将关键点的坐标转换为浮点型数组
        keypoints = np.float32([i.pt for i in keypoints])
        # 返回关键点和特征
        return keypoints, features

    '''
        使用欧氏距离计算所有可能的匹配项
    '''
    def get_all_possible_matches(self, featuresA, featuresB):
        # compute the all matches using Euclidean distance. Opencv provide DescriptorMatcher_create() function for that
        match_instance = cv2.DescriptorMatcher_create("BruteForce")
        All_Matches = match_instance.knnMatch(featuresA, featuresB, 2)
        return All_Matches

    '''
        根据Lowe的概念获取所有有效的匹配
    '''
    def get_all_valid_matches(self, AllMatches, lowe_ratio):
        # to get all valid matches according to lowe concept..
        valid_matches = []
        for val in AllMatches:
            if len(val) == 2 and val[0].distance < val[1].distance * lowe_ratio:
                valid_matches.append((val[0].trainIdx, val[0].queryIdx))
        return valid_matches

    '''
        计算单应性矩阵
    '''
    def compute_homography(self, pointsA, pointsB, max_Threshold):
        return cv2.findHomography(pointsA, pointsB, cv2.RANSAC, max_Threshold)

    '''
        用于匹配关键点并计算单应性矩阵的函数。
        输入参数包括KeypointsA和KeypointsB（两组关键点），
        featuresA和featuresB（两组特征描述子），
        lowe_ratio（用于筛选有效匹配的阈值），
        max_Threshold（用于计算单应性矩阵的阈值）。
    '''
    def match_keypoints(self, KeypointsA, KeypointsB, featuresA, featuresB, lowe_ratio, max_Threshold):
        all_matches = self.get_all_possible_matches(featuresA, featuresB)
        valid_matches = self.get_all_valid_matches(all_matches, lowe_ratio)

        if len(valid_matches) <= 4:
            return None

        # construct the two sets of points
        points_A = np.float32([KeypointsA[i] for (_, i) in valid_matches])
        points_B = np.float32([KeypointsB[i] for (i, _) in valid_matches])
        (homograpgy, status) = self.compute_homography(points_A, points_B, max_Threshold)
        return valid_matches, homograpgy, status

    '''
        获取图像尺寸
    '''
    def get_image_dimension(self, image):
        return image.shape[:2]

    '''
        接受两个参数imageA和imageB，并返回一个合并了这两个图像的新图像
    '''
    def get_points(self, imageA, imageB):
        (hA, wA) = self.get_image_dimension(imageA)
        (hB, wB) = self.get_image_dimension(imageB)
        vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
        vis[0:hA, 0:wA] = imageA
        vis[0:hB, wA:] = imageB
        return vis

    '''
        用于绘制匹配点的函数。它接受两个图像imageA和imageB，以及它们的关键点KeypointsA和KeypointsB，以及匹配结果matches和状态status作为输入。
    '''
    def draw_matches(self, imageA, imageB, KeypointsA, KeypointsB, matches, status):
        (hA, wA) = self.get_image_dimension(imageA)
        vis = self.get_points(imageA, imageB)

        # loop over the matches
        for ((trainIdx, queryIdx), s) in zip(matches, status):
            if s == 1:
                ptA = (int(KeypointsA[queryIdx][0]), int(KeypointsA[queryIdx][1]))
                ptB = (int(KeypointsB[trainIdx][0]) + wA, int(KeypointsB[trainIdx][1]))
                cv2.line(vis, ptA, ptB, (0, 255, 0), 1)
        return vis
