import argparse
import cv2
import numpy as np
from PIL import Image
import sys
import os


class PrecisionStitcher:
    def __init__(self, min_matches=100):
        self.min_matches = min_matches
        self.sift = cv2.SIFT_create(contrastThreshold=0.03, edgeThreshold=5)
        self.flann = cv2.FlannBasedMatcher({'algorithm': 1, 'trees': 5}, {'checks': 50})
        self.max_stitch_width = None  # 初始设为None，将在stitch方法中计算
        self.match_output_dir = "matches_intermediate"
        os.makedirs(self.match_output_dir, exist_ok=True)

    def stitch(self, image_paths):
        """主拼接流程（自动裁剪黑色边缘）"""
        images = [cv2.cvtColor(cv2.imread(p), cv2.COLOR_BGR2RGB) for p in image_paths]
        # 计算所有输入图片的总宽度
        self.max_stitch_width = sum(img.shape[1] for img in images)
        base = images[0]

        for i in range(1, len(images)):
            try:
                print(f"\n正在拼接第 {i + 1}/{len(images)} 张...")
                base = self.stitch_pair(base, images[i], step=i)

                # 保存中间结果（第一张+第二张拼接后）
                if i == 1:
                    cv2.imwrite(
                        "stitched_1_2.jpg",
                        cv2.cvtColor(base, cv2.COLOR_RGB2BGR)
                    )
                    print("已保存第一张+第二张拼接结果: stitched_1_2.jpg")

                h, w = base.shape[:2]
                print(f"更新尺寸: {w}x{h} (总宽度: {self.max_stitch_width})")
            except Exception as e:
                print(f"拼接终止: {str(e)}")
                break

        # 在返回结果前添加自动裁剪
        result = self._auto_crop(base)
        return result

    def _auto_crop(self, img):
        """自动裁剪图像中的黑色边缘"""
        # 转换为灰度图并二值化
        gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        _, thresh = cv2.threshold(gray, 1, 255, cv2.THRESH_BINARY)
        
        # 找到非黑色区域的边界
        coords = cv2.findNonZero(thresh)
        x, y, w, h = cv2.boundingRect(coords)
        
        # 裁剪图像
        cropped = img[y:y+h, x:x+w]
        print(f"已裁剪黑色边缘，原尺寸: {img.shape[1]}x{img.shape[0]} -> 新尺寸: {w}x{h}")
        return cropped

    def stitch_pair(self, base, new_img, step):
        """精确配对拼接（优化融合权重）"""
        kp1, des1 = self.sift.detectAndCompute(base, None)
        kp2, des2 = self.sift.detectAndCompute(new_img, None)
        matches = self.flann.knnMatch(des1, des2, k=2)
        good = [m[0] for m in matches if len(m) == 2 and m[0].distance < 0.7 * m[1].distance]

        if len(good) < self.min_matches:
            raise ValueError(f"匹配点不足 ({len(good)}/{self.min_matches})")

        self._draw_matches(base, kp1, new_img, kp2, good, step)

        # 计算变换矩阵
        src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
        dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
        M, _ = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0)
        self._validate_homography(M)

        warped = self._smart_warp(new_img, M, base.shape)
        overlap_start, overlap_end = self._find_overlap_region(base, warped)
        
        # 计算最终图像宽度（确保包含基础图像全部和新图像右侧）
        final_width = max(base.shape[1], overlap_end + (warped.shape[1] - overlap_end))
        
        # 创建结果图像并完全保留基础图像
        result = np.zeros((base.shape[0], final_width, 3), dtype=np.uint8)
        result[:, :base.shape[1]] = base  # 首先完整保留基础图像
        
        # 仅在新图像右侧超出基础图像时才进行补充
        if warped.shape[1] > base.shape[1]:
            result[:, base.shape[1]:] = warped[:, base.shape[1]:base.shape[1]+(final_width-base.shape[1])]

        return result

    def _validate_homography(self, M):
        """验证单应性矩阵有效性"""
        if abs(M[0, 2]) > 2000:  # 限制单次平移不超过2000px
            raise ValueError("异常平移量")
        scale_x = np.sqrt(M[0, 0] ** 2 + M[0, 1] ** 2)
        if not (0.8 < scale_x < 1.2):
            raise ValueError(f"异常水平缩放: {scale_x:.2f}")
        return True

    def _smart_warp(self, img, M, base_shape):
        """智能图像变换"""
        h, w = base_shape[:2]
        corners = np.array([[0, 0], [w, 0], [w, h], [0, h]], dtype=np.float32)
        warped_corners = cv2.perspectiveTransform(corners.reshape(-1, 1, 2), M).reshape(-1, 2)
        new_width = int(max(warped_corners[:, 0]) - min(0, min(warped_corners[:, 0])))
        return cv2.warpPerspective(img, M, (new_width, h))

    def _find_overlap_region(self, base, warped):
        gray_base = cv2.cvtColor(base, cv2.COLOR_RGB2GRAY)
        gray_warped = cv2.cvtColor(warped, cv2.COLOR_RGB2GRAY)
        
        # 增大模板区域并尝试不同匹配方法
        template_height = min(400, gray_base.shape[0])  # 增加到400像素
        template = gray_base[-template_height:, :]
        
        # 尝试多种匹配方法
        methods = [cv2.TM_CCOEFF_NORMED, cv2.TM_CCORR_NORMED, cv2.TM_SQDIFF_NORMED]
        max_vals = []
        
        for method in methods:
            res = cv2.matchTemplate(gray_warped, template, method)
            _, max_val, _, _ = cv2.minMaxLoc(res)
            max_vals.append(max_val)
        
        best_method = methods[np.argmax(max_vals)]
        result = cv2.matchTemplate(gray_warped, template, best_method)
        _, max_val, _, max_loc = cv2.minMaxLoc(result)
        
        print(f"最佳匹配方法: {best_method}, 匹配置信度: {max_val:.4f}")
        
        if max_val < 0.4:  # 降低阈值要求
            raise ValueError(f"匹配失败，最高置信度{max_val:.4f}")
            
        overlap_start = max_loc[0]
        overlap_end = min(base.shape[1], overlap_start + gray_base.shape[1])
        return overlap_start, overlap_end

    def _draw_matches(self, img1, kp1, img2, kp2, matches, step):
        """绘制特征匹配点（绿色）"""
        match_img = cv2.drawMatches(
            img1, kp1, img2, kp2, matches, None,
            matchColor=(0, 255, 0),  # 绿色
            singlePointColor=(0, 255, 0),
            flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS
        )
        output_path = os.path.join(self.match_output_dir, f"match_step_{step}.jpg")
        cv2.imwrite(output_path, match_img)
        print(f"已保存特征匹配点可视化: {output_path}")


def main():
    parser = argparse.ArgumentParser(description="高精度图像拼接工具")
    parser.add_argument("-i", "--images", nargs="+", required=True,
                        help="输入图像路径列表")
    parser.add_argument("-o", "--output", required=True,
                        help="输出图像路径")

    args = parser.parse_args()

    try:
        stitcher = PrecisionStitcher()
        result = stitcher.stitch(args.images)

        Image.fromarray(result).save(args.output)
        print(f"\n成功保存至: {args.output}")
        print(f"最终尺寸: {result.shape[1]}x{result.shape[0]}")
    except Exception as e:
        print(f"错误: {str(e)}")
        sys.exit(1)


if __name__ == "__main__":
    main()