import cv2
import numpy as np
import os

r"""
python d:\code\video-scan\opencv-document-scanner\image_stitcher.py --output d:\code\video-scan\output\stitched.jpg --images d:\code\video-scan\images\img1.jpg d:\code\video-scan\images\img2.jpg d:\code\video-scan\images\img3.jpg

这个实现的主要特点：

- 使用SIFT算法检测特征点，比SURF更稳定
- 使用BFMatcher进行特征匹配
- 应用RANSAC算法计算单应性矩阵
- 实现了像素级的图像融合
- 支持多张图片的连续拼接
- 提供了友好的命令行接口
注意事项：

- 图片需要有足够的重叠区域
- 拼接顺序会影响最终效果
- 图片分辨率过大可能会导致处理速度较慢
"""

class ImageStitcher:
    def __init__(self):
        # 创建SIFT特征检测器
        self.detector = cv2.SIFT_create()
        
    def stitch_images(self, images):
        """
        拼接多张图片
        
        Args:
            images: 图片列表
        
        Returns:
            拼接后的图片
        """
        if len(images) < 2:
            return images[0] if images else None
            
        # 以第一张图片为基准
        result = images[0]
        
        # 逐一拼接后续图片
        for i, img in enumerate(images[1:]):
            print(f"正在拼接第 {i+2}/{len(images)} 张图片...")
            result = self._stitch_pair(result, img)
            
        return result
    
    def _stitch_pair(self, img1, img2):
        """拼接两张图片"""
        # 转换为灰度图
        gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
        gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
        
        # 检测特征点和描述符
        keypoints1, descriptors1 = self.detector.detectAndCompute(gray1, None)
        keypoints2, descriptors2 = self.detector.detectAndCompute(gray2, None)
        
        # 特征匹配
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(descriptors1, descriptors2, k=2)
        
        # 应用Lowe's比率测试筛选好的匹配点
        good_matches = []
        for m, n in matches:
            if m.distance < 0.75 * n.distance:
                good_matches.append(m)
        
        print(f"找到 {len(good_matches)} 个匹配点")
        
        if len(good_matches) >= 4:
            # 获取匹配点的坐标
            src_pts = np.float32([keypoints1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            dst_pts = np.float32([keypoints2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
            
            # 计算单应性矩阵
            H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
            
            # 获取图像尺寸
            h1, w1 = img1.shape[:2]
            h2, w2 = img2.shape[:2]
            
            # 计算变换后的图像范围
            pts1 = np.float32([[0, 0], [0, h1], [w1, h1], [w1, 0]]).reshape(-1, 1, 2)
            pts2 = cv2.perspectiveTransform(pts1, H)
            pts = np.concatenate((pts2, np.float32([[0, 0], [0, h2], [w2, h2], [w2, 0]]).reshape(-1, 1, 2)))
            
            [xmin, ymin] = np.int32(pts.min(axis=0).ravel() - 0.5)
            [xmax, ymax] = np.int32(pts.max(axis=0).ravel() + 0.5)
            t = [-xmin, -ymin]
            
            # 创建平移矩阵
            Ht = np.array([[1, 0, t[0]], [0, 1, t[1]], [0, 0, 1]])
            
            # 对图像进行变换
            result_warped = cv2.warpPerspective(img1, Ht.dot(H), (xmax-xmin, ymax-ymin))
            
            # 将第二张图片放置到正确位置
            img2_warped = np.zeros_like(result_warped)
            img2_warped[t[1]:t[1]+h2, t[0]:t[0]+w2] = img2
            
            # 创建掩码
            mask1 = (cv2.cvtColor(result_warped, cv2.COLOR_BGR2GRAY) > 0)
            mask2 = (cv2.cvtColor(img2_warped, cv2.COLOR_BGR2GRAY) > 0)
            
            # 创建融合结果
            result_blended = np.zeros_like(result_warped)
            
            # 非重叠区域
            result_blended[mask1] = result_warped[mask1]
            result_blended[mask2] = img2_warped[mask2]
            
            # 重叠区域
            overlap = mask1 & mask2
            if overlap.any():
                # 创建渐变权重
                seam_x = np.where(overlap)[1].mean()
                x_coords = np.arange(result_warped.shape[1])
                weights = np.zeros_like(x_coords, dtype=np.float32)
                weights[x_coords < seam_x] = 1
                weights = np.tile(weights, (result_warped.shape[0], 1))
                
                # 应用权重
                weights = np.expand_dims(weights, axis=2)
                result_blended[overlap] = (
                    result_warped[overlap] * weights[overlap] +
                    img2_warped[overlap] * (1 - weights[overlap])
                )
            
            return result_blended
        print("匹配点不足，无法拼接")
        return img1

def main():
    import argparse
    
    parser = argparse.ArgumentParser(description='图像拼接工具')
    parser.add_argument('--output', required=True, help='输出图像路径')
    parser.add_argument('--images', required=True, nargs='+', help='输入图像路径列表')
    
    args = parser.parse_args()
    
    # 读取图片
    images = []
    for path in args.images:
        img = cv2.imread(path)
        if img is None:
            print(f"无法读取图片: {path}")
            continue
        images.append(img)
    
    if len(images) < 2:
        print("至少需要两张图片进行拼接")
        return
    
    # 拼接图片
    stitcher = ImageStitcher()
    result = stitcher.stitch_images(images)
    
    if result is not None:
        # 确保输出目录存在
        output_dir = os.path.dirname(args.output)
        if output_dir and not os.path.exists(output_dir):
            os.makedirs(output_dir)
            
        cv2.imwrite(args.output, result)
        print(f"拼接完成，已保存到: {args.output}")
    else:
        print("拼接失败")

if __name__ == "__main__":
    main()