import cv2
import numpy as np
import time

class ImageStitcher:
    def __init__(self):
        self.MIN = 20
        self.FLANN_INDEX_KDTREE = 0
        self.sift = cv2.SIFT_create()

    def show(self, name, img):
        cv2.imshow(name, img)
        cv2.waitKey(0)
        cv2.destroyAllWindows()

    def match_features(self, img1, img2):
        kp1, descrip1 = self.sift.detectAndCompute(img1, None)
        kp2, descrip2 = self.sift.detectAndCompute(img2, None)

        index_params = dict(algorithm=self.FLANN_INDEX_KDTREE, trees=10)  # 调整特征匹配算法参数
        search_params = dict(checks=50)  # 调整特征匹配算法参数
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(descrip1, descrip2, k=2)

        good_matches = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good_matches.append(m)
        
        return good_matches, kp1, kp2

    def stitch_images(self, img1, img2, good_matches, kp1, kp2):
        if len(good_matches) > self.MIN:
            src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            
            warped_img = cv2.warpPerspective(img2, np.linalg.inv(M), (img1.shape[1] + img2.shape[1], img2.shape[0]))
            direct = warped_img.copy()
            direct[0:img1.shape[0], 0:img1.shape[1]] = img1

            return direct

    def blend_images(self, img1, img2):
        rows, cols = img1.shape[:2]
        left, right = None, None

        for col in range(0, cols):
            if img1[:, col].any() and img2[:, col].any():
                left = col
                break

        for col in range(cols-1, 0, -1):
            if img1[:, col].any() and img2[:, col].any():
                right = col
                break

        res = np.zeros([rows, cols, 3], np.uint8)
        for row in range(0, rows):
            for col in range(0, cols):
                if not img1[row, col].any():
                    res[row, col] = img2[row, col]
                elif not img2[row, col].any():
                    res[row, col] = img1[row, col]
                else:
                    src_img_len = float(abs(col - left))
                    test_img_len = float(abs(col - right))
                    alpha = src_img_len / (src_img_len + test_img_len)
                    res[row, col] = np.clip(img1[row, col] * (1 - alpha) + img2[row, col] * alpha, 0, 255)

        img2[0:img1.shape[0], 0:img1.shape[1]] = res
        return img2

    def main(self, image_paths,output_path):
        starttime = time.time()
        if len(image_paths) < 2:
            print("Need at least two images for stitching.")
            return

        images = [cv2.imread(path) for path in image_paths]
        resized_images = [cv2.resize(img, (200, 400), fx=0.2, fy=0.2) for img in images]
        # resized_images = [cv2.resize(img, (0, 0), fx=0.2, fy=0.2) for img in images]
        good_matches, kp1, kp2 = self.match_features(resized_images[0], resized_images[1])
        stitched_img = self.stitch_images(resized_images[0], resized_images[1], good_matches, kp1, kp2)
        
        for img in resized_images[2:]:
            good_matches, kp1, kp2 = self.match_features(stitched_img, img)
            stitched_img = self.stitch_images(stitched_img, img, good_matches, kp1, kp2)
        
        final_img = self.blend_images(resized_images[0], stitched_img)
        cv2.imwrite(output_path, final_img)
        # final_img = self.blend_images(resized_images[0], stitched_img)
        final = time.time()
        print("Time taken:", final - starttime)
        self.show('Result', final_img)
    

def main():
    stitcher = ImageStitcher()
    image_paths = ['./data/00000.png', './data/00001.png']  # Example image paths
    stitcher.main(image_paths,'./data/output.png')

if __name__ == "__main__":
    main()
