"""
!/usr/bin/env python
-*- coding: utf-8 -*-
@CreateTime : 2025/2/24 9:51
@Author  :    AnimateX
@Contact :    animatex@163.com
@File    :    tmp_new.py
@License :    Copyright © 2024 AnimateX. All rights reserved.
@Version :    tmp_new_2025/2/24.0.1

-------------------------------------------------------------------------------
# @Description:

-------------------------------------------------------------------------------
"""
import cv2
import sys
import time
import argparse
import torch
import numpy as np
from torch import nn, optim
import torch.utils.data as Data
from typing import Tuple, List, Optional
from concurrent.futures import ThreadPoolExecutor
import heapq

from utils_feature import Extractor, timing_decorator
from superpoint import SuperPoint

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


class PreProcess:
    def __init__(self):
        self.target_crop_param = None

        # morphology param
        self.mor_kszie = 5

    @staticmethod
    def manual_crop_image(img, x_l: int, x_r: int, y_u: int, y_d: int):
        return img[y_u:y_d, x_l:x_r]

    def auto_crop_fisheye_image(self, img: np.ndarray, is_first: bool = True, en_morphology: bool = False) -> np.ndarray:
        if self.target_crop_param is not None and not is_first:
            x_l, x_r, y_u, y_d = self.target_crop_param
            return img[y_u: y_d, x_l: x_r]

        if len(img.shape) == 3:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            gray = img.copy()

        _, binary = cv2.threshold(gray, 5, 255, cv2.THRESH_BINARY)

        if en_morphology:
            kernel = np.ones((self.mor_kszie, self.mor_kszie), np.uint8)
            binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)

        contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
        if not contours:
            raise ValueError("No fisheye circle detected")
        max_contour = max(contours, key=cv2.contourArea)

        points = max_contour[:, 0, :]  # 提取所有点的坐标

        # 使用向量化操作分类点
        mid_y = img.shape[0] / 2
        mid_x = img.shape[1] / 2

        # 使用布尔索引一次性分类点
        top_mask = points[:, 1] < mid_y
        left_mask = points[:, 0] < mid_x

        # 计算边界（向量化操作）
        x_left = np.min(points[left_mask, 0]) + 1
        x_right = np.max(points[~left_mask, 0]) - 1
        y_up = np.min(points[top_mask, 1]) + 1
        y_down = np.max(points[~top_mask, 1]) - 1

        # 计算并调整为正方形
        width = x_right - x_left
        height = y_down - y_up

        if width != height:
            diff = abs(width - height)
            half_diff = diff // 2

            if width > height:
                y_down = min(img.shape[0], y_down + half_diff)
                y_up = max(0, y_up - half_diff)
            else:
                x_right = min(img.shape[1], x_right + half_diff)
                x_left = max(0, x_left - half_diff)

        # 裁切图像
        cropped = img[int(y_up):int(y_down), int(x_left):int(x_right)]

        # 如果尺寸不一致，进行填充（使用向量化操作）
        if cropped.shape[0] != cropped.shape[1]:
            size = max(cropped.shape[0], cropped.shape[1])
            shape = (size, size, 3) if len(img.shape) == 3 else (size, size)
            result = np.zeros(shape, dtype=img.dtype)

            y_offset = (size - cropped.shape[0]) // 2
            x_offset = (size - cropped.shape[1]) // 2
            result[y_offset:y_offset + cropped.shape[0], x_offset:x_offset + cropped.shape[1]] = cropped
            cropped = result

        if is_first:
            self.target_crop_param = (int(x_left), int(x_right), int(y_up), int(y_down))

        return cropped

    @staticmethod
    def crop_fisheye_image_new(image, need_close=False, crop_indices=None):
        """
        裁剪鱼眼图像，去除边缘黑圈，保留正方形区域。

        参数:
            image: 输入的鱼眼图像
            need_close: 是否需要形态学闭操作，默认为False
            crop_indices: 裁剪索引 [x_l, x_r, y_u, y_d]，默认为None

        返回:
            cropped_img: 裁剪后的图像
            (x_l, x_r, y_u, y_d): 裁剪的索引
        """
        # 1. 检查是否已有裁剪索引
        if crop_indices is not None:
            x_l, x_r, y_u, y_d = crop_indices
            return image[y_u:y_d, x_l:x_r], (x_l, x_r, y_u, y_d)

        # 2. 检查图像是否为灰度图，如果不是则转换
        if len(image.shape) > 2 and image.shape[2] > 1:
            gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            gray_img = image.copy()

        # 3. 二值化图像
        threshold_value = 10  # 可以根据实际情况调整阈值
        _, binary_img = cv2.threshold(gray_img, threshold_value, 255, cv2.THRESH_BINARY)

        # 可选的形态学闭操作
        if need_close:
            kernel = np.ones((5, 5), np.uint8)
            binary_img = cv2.morphologyEx(binary_img, cv2.MORPH_CLOSE, kernel)

        # 4. 找到图像的直径
        height, width = binary_img.shape
        center_y, center_x = height // 2, width // 2

        # 初始化直径测量结果
        d_values = []

        # 检查多个列，在中心附近取样
        for col_offset in range(-10, 10, 4):
            col = center_x + col_offset
            if col < 0 or col >= width:
                continue

            # 从上向下寻找边界
            upper_y = None
            for y in range(height):
                # 找到连续几个非0像素作为边界起点
                if binary_img[y, col] > 0:
                    consecutive_count = 0
                    for check_y in range(y, min(y + 5, height)):
                        if binary_img[check_y, col] > 0:
                            consecutive_count += 1
                    if consecutive_count >= 4:  # 至少3个连续像素
                        upper_y = y
                        break

            # 从下向上寻找边界
            lower_y = None
            for y in range(height - 1, -1, -1):
                if binary_img[y, col] > 0:
                    consecutive_count = 0
                    for check_y in range(y, max(y - 5, -1), -1):
                        if binary_img[check_y, col] > 0:
                            consecutive_count += 1
                    if consecutive_count >= 4:  # 至少3个连续像素
                        lower_y = y
                        break

            if upper_y is not None and lower_y is not None:
                d_values.append(lower_y - upper_y)

        # 检查多个行，在中心附近取样
        for row_offset in range(-10, 10, 4):
            row = center_y + row_offset
            if row < 0 or row >= height:
                continue

            # 从左向右寻找边界
            left_x = None
            for x in range(width):
                if binary_img[row, x] > 0:
                    consecutive_count = 0
                    for check_x in range(x, min(x + 5, width)):
                        if binary_img[row, check_x] > 0:
                            consecutive_count += 1
                    if consecutive_count >= 4:  # 至少3个连续像素
                        left_x = x
                        break

            # 从右向左寻找边界
            right_x = None
            for x in range(width - 1, -1, -1):
                if binary_img[row, x] > 0:
                    consecutive_count = 0
                    for check_x in range(x, max(x - 5, -1), -1):
                        if binary_img[row, check_x] > 0:
                            consecutive_count += 1
                    if consecutive_count >= 4:  # 至少3个连续像素
                        right_x = x
                        break

            if left_x is not None and right_x is not None:
                d_values.append(right_x - left_x)

        # 如果无法找到有效直径，返回原图
        if not d_values:
            return image, (0, width, 0, height)

        # 取直径的中位数或较大值，避免异常值的影响
        d_values.sort()
        diameter = d_values[len(d_values) * 3 // 4]  # 取 75% 分位数

        # 计算裁剪区域
        x_l = max(0, center_x - diameter // 2)
        x_r = min(width, center_x + diameter // 2)
        y_u = max(0, center_y - diameter // 2)
        y_d = min(height, center_y + diameter // 2)

        # 确保裁剪区域是方形
        crop_size = min(x_r - x_l, y_d - y_u)

        x_l = center_x - crop_size // 2
        x_r = center_x + crop_size // 2
        y_u = center_y - crop_size // 2
        y_d = center_y + crop_size // 2

        # 确保索引在有效范围内
        x_l = max(0, x_l)
        x_r = min(width, x_r)
        y_u = max(0, y_u)
        y_d = min(height, y_d)

        # 裁剪图像
        cropped_img = image[y_u:y_d, x_l:x_r]

        return cropped_img, (x_l, x_r, y_u, y_d)

    @staticmethod
    def auto_crop_fisheye(img: np.ndarray) -> Tuple[np.ndarray, Tuple[int, int, int, int]]:
        """
        自动裁切鱼眼图像，去除黑色边框，保留中间的圆形区域
        """
        # 转换为灰度图
        if len(img.shape) == 3:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            gray = img.copy()

        # 使用自适应阈值进行二值化
        binary = cv2.adaptiveThreshold(
            gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
            cv2.THRESH_BINARY, 11, 2
        )

        _, binary = cv2.threshold(gray, 5, 255, cv2.THRESH_BINARY)

        # 使用更小的核进行形态学操作
        kernel = np.ones((3, 3), np.uint8)
        binary = cv2.morphologyEx(binary, cv2.MORPH_CLOSE, kernel)

        # 寻找轮廓
        contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)

        if not contours:
            raise ValueError("No fisheye circle detected")

        # 找到最大的轮廓
        max_contour = max(contours, key=cv2.contourArea)

        # 获取轮廓的精确边界点
        top_points = []
        bottom_points = []
        left_points = []
        right_points = []

        for point in max_contour[:, 0, :]:
            x, y = point
            top_points.append((x, y)) if y < img.shape[0]/2 else bottom_points.append((x, y))
            left_points.append((x, y)) if x < img.shape[1]/2 else right_points.append((x, y))

        # 计算边界
        x_left = min(p[0] for p in left_points) + 1
        x_right = max(p[0] for p in right_points) - 1
        y_up = min(p[1] for p in top_points) + 1
        y_down = max(p[1] for p in bottom_points) - 1

        # 确保边界是正方形
        width = x_right - x_left
        height = y_down - y_up

        if width > height:
            diff = width - height
            y_down = min(img.shape[0], y_down + diff // 2)
            y_up = max(0, y_up - diff // 2)
        elif height > width:
            diff = height - width
            x_right = min(img.shape[1], x_right + diff // 2)
            x_left = max(0, x_left - diff // 2)

        # 裁切图像
        cropped = img[y_up:y_down, x_left:x_right]

        # 验证并调整为正方形
        if cropped.shape[0] != cropped.shape[1]:
            size = max(cropped.shape[0], cropped.shape[1])
            result = np.zeros((size, size, 3) if len(img.shape) == 3 else (size, size), dtype=img.dtype)
            y_offset = (size - cropped.shape[0]) // 2
            x_offset = (size - cropped.shape[1]) // 2
            result[y_offset:y_offset+cropped.shape[0], x_offset:x_offset+cropped.shape[1]] = cropped
            cropped = result

        return cropped, (x_left, x_right, y_up, y_down)

    @staticmethod
    def convert_to_gray(img):
        return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    @staticmethod
    def deduplication(inliers):
        pts_1, indexes_1 = np.unique(inliers[:, 1], return_index=True, axis=0)
        label, indexes_2 = np.unique(inliers[:, 0][indexes_1], return_index=True, axis=0)
        train = pts_1[indexes_2]
        return train, label


class ImageBlender:
    """多频带图像融合处理器"""

    @staticmethod
    def build_gaussian_pyramid(img: np.ndarray, levels: int) -> List[np.ndarray]:
        pyramid = [img]
        for _ in range(levels - 1):
            pyramid.append(cv2.pyrDown(pyramid[-1]))
        return pyramid

    @staticmethod
    def build_laplacian_pyramid(img: np.ndarray, levels: int) -> List[np.ndarray]:
        pyramid = []
        current = img.copy()
        for _ in range(levels - 1):
            down = cv2.pyrDown(current)
            up = cv2.pyrUp(down, dstsize=(current.shape[1], current.shape[0]))
            pyramid.append(current - up)
            current = down
        pyramid.append(current)
        return pyramid

    @staticmethod
    def _reconstruct_pyramid(pyramid: List[np.ndarray]) -> np.ndarray:
        result = pyramid[-1]
        for layer in reversed(pyramid[:-1]):
            result = cv2.pyrUp(result, dstsize=(layer.shape[1], layer.shape[0]))
            if result.shape != layer.shape:
                img = cv2.resize(result, (layer.shape[1], layer.shape[0]))
            result += layer
        return np.clip(result, 0, 255).astype(np.uint8)

    @staticmethod
    def multi_band_blending(img1: np.ndarray, img2: np.ndarray, mask: np.ndarray, levels: int = 6) -> np.ndarray:
        min_shape = min(img1.shape[0], img1.shape[1], img2.shape[0], img2.shape[1])
        max_levels = int(np.floor(np.log2(min_shape)))

        if levels is None:
            levels = max_levels

        if levels < 1 or levels > max_levels:
            levels = max_levels

        mask_pyramid = ImageBlender.build_gaussian_pyramid(mask, levels)
        lap_a = ImageBlender.build_laplacian_pyramid(img1.astype(np.float64), levels)
        lap_b = ImageBlender.build_laplacian_pyramid(img2.astype(np.float64), levels)

        blended = [la * mp + lb * (1.0 - mp) for la, lb, mp in zip(lap_a, lap_b, mask_pyramid)]
        return ImageBlender._reconstruct_pyramid(blended)

    @staticmethod
    def _compute_error(img_a: np.ndarray, img_b: np.ndarray) -> np.ndarray:
        """计算两个图像之间的误差"""
        diff = img_a.astype(np.float32) - img_b.astype(np.float32)
        return np.sum(np.square(diff), axis=2) if img_a.ndim == 3 else np.square(diff)

    @staticmethod
    def _dynamic_programming(error: np.ndarray, mask_height: int) -> np.ndarray:
        """使用动态规划计算最小代价路径"""
        error_ = np.zeros(error.shape, np.float32)
        error_[0] = error[0]

        for i in range(1, mask_height):
            error_[i, 0] = error[i, 0] + min(error_[i - 1, 0:2])
            error_[i, 1:-1] = error[i, 1:-1] + np.minimum.reduce([
                error_[i - 1, :-2], error_[i - 1, 1:-1], error_[i - 1, 2:]
            ])
            error_[i, -1] = error[i, -1] + min(error_[i - 1, -2:])

        return error_

    @staticmethod
    def _create_blend_mask(min_loc_left: np.ndarray, min_loc_right: np.ndarray,
                           mask_shape: Tuple[int, int]) -> np.ndarray:
        """创建混合遮罩（向量化版本）"""
        mask_ = np.ones((mask_shape[1], mask_shape[0], 3), np.float32)
        y_coords = np.arange(mask_shape[1])

        # 创建网格坐标
        y_grid, x_grid = np.meshgrid(y_coords, np.arange(mask_shape[0]), indexing='ij')

        # 创建条件掩码
        blend_mask = (x_grid >= min_loc_left[:, np.newaxis]) & (x_grid < min_loc_right[:, np.newaxis])
        seam_mask_left = x_grid == min_loc_left[:, np.newaxis]
        seam_mask_right = x_grid == min_loc_right[:, np.newaxis]

        # 应用掩码
        mask_[blend_mask] = 0
        mask_[seam_mask_left | seam_mask_right] = 0.5

        return mask_

    @staticmethod
    def compute_seam_mask(
            left_img1: np.ndarray,
            left_img2: np.ndarray,
            right_img1: np.ndarray,
            right_img2: np.ndarray,
            mask_size: Tuple[int, int],
            x_offset_left: int,
            x_offset_right: int,
            previous_seams: Optional[List[np.ndarray]] = None,
            temporal_weights: Tuple[float, float, float] = (0.4, 0.3, 0.3)
    ) -> Tuple[np.ndarray, List[np.ndarray]]:
        """
        计算最优拼接缝的混合遮罩。

        Args:
            left_img1: 左图像的左重叠区域
            left_img2: 左图像的右重叠区域
            right_img1: 右图像的左重叠区域
            right_img2: 右图像的右重叠区域
            mask_size: 输出遮罩的尺寸 (width, height)
            x_offset_left: 左侧接缝位置的X坐标偏移
            x_offset_right: 右侧接缝位置的X坐标偏移
            previous_seams: 用于时间平滑的前一帧接缝位置
            temporal_weights: 时间平滑权重 (当前帧, 前一帧, 前两帧)

        Returns:
            Tuple[np.ndarray, List[np.ndarray]]: (混合遮罩, 更新后的接缝位置)
        """
        # 输入验证
        if not all(img is not None and img.size > 0 for img in [left_img1, left_img2, right_img1, right_img2]):
            raise ValueError("输入图像不能为空")

        if not all(img1.shape == img2.shape for img1, img2 in [
            (left_img1, left_img2), (right_img1, right_img2)
        ]):
            raise ValueError("输入图像尺寸不匹配")

        # 计算误差
        error_left = ImageBlender._compute_error(left_img1, left_img2)
        error_right = ImageBlender._compute_error(right_img1, right_img2)

        # 动态规划
        err_left = ImageBlender._dynamic_programming(error_left, mask_size[1])
        err_right = ImageBlender._dynamic_programming(error_right, mask_size[1])

        # 计算最小位置
        min_loc_l = np.argmin(err_left, axis=1) + x_offset_left
        min_loc_r = np.argmin(err_right, axis=1) + x_offset_right

        # 时间平滑
        if previous_seams is None:
            previous_seams = [min_loc_l, min_loc_r, min_loc_l, min_loc_r]

        w1, w2, w3 = temporal_weights
        min_loc_left_final = np.int32(
            w1 * min_loc_l + w2 * previous_seams[0] + w3 * previous_seams[2]
        )
        min_loc_right_final = np.int32(
            w1 * min_loc_r + w2 * previous_seams[1] + w3 * previous_seams[3]
        )

        # 创建遮罩
        mask = ImageBlender._create_blend_mask(
            min_loc_left_final, min_loc_right_final, mask_size
        )

        return mask, [min_loc_l, min_loc_r, min_loc_left_final, min_loc_right_final]


class FisheyeUndistorter:
    """
        Corrects fisheye distortion using a geometric projection model.
    Args:
        fov_degrees:   Field of view of the fisheye camera in degrees.
        fisheye_img_w: Width of the original fisheye image.
        fisheye_img_h: Height of the original fisheye image.
        undistorted_w: Width of the undistorted output image.
    """

    def __init__(
            self,
            fov_degrees: float,
            fisheye_img_w: int,
            fisheye_img_h: int,
            undistorted_w: int
    ):
        if not (0 < fov_degrees < 360):
            raise ValueError("fov_degrees must be in (0, 180)")

        if fisheye_img_w <= 0 or fisheye_img_h <= 0 or undistorted_w <= 0:
            raise ValueError("Image dimensions must be positive integers")

        if fisheye_img_w != fisheye_img_h:
            raise ValueError("Image dimensions must be equal to image dimensions")

        self.fisheye_w = fisheye_img_w
        self.fisheye_h = fisheye_img_h
        self.undistorted_w = undistorted_w
        self.fov_rad = np.deg2rad(fov_degrees)

        # 生成映射关系
        self.target_x_coords, self.target_y_coords, self.radius = self.create_fisheye_projection_map(
            undistorted_w=self.undistorted_w,
            undistorted_h=self.fisheye_h,
            target_w=self.fisheye_w,
            target_h=self.fisheye_h
        )

    def create_fisheye_projection_map(
            self,
            undistorted_w: int,  # 去扭曲图像（源）的宽度 (Ws)
            undistorted_h: int,  # 去扭曲图像（源）的高度 (Hs)
            target_w: int,  # 鱼眼图像（目标）的宽度 (Wd)
            target_h: int,  # 鱼眼图像（目标）的高度 (Hd)
    ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
        """
            Generate coordinate maps for projecting a rectangular image to fisheye.
        """
        # 生成源图像的像素网格坐标（原点在左上角）
        y_pixels, x_pixels = np.indices((undistorted_h, undistorted_w), dtype=np.float32)

        # 将坐标原点移至图像中心，并调整方向（y轴向上为正）
        y_centered = undistorted_h / 2.0 - y_pixels  # 转换为笛卡尔坐标系
        x_centered = x_pixels - undistorted_w / 2.0

        # 将平面坐标转换为球面坐标（等距投影模型）
        # theta: 方位角（绕z轴的旋转角，范围 [-π, π]）
        # phi: 极角（与z轴的夹角，范围 [0, fov/2]）
        theta, phi = self.cartesian_to_spherical(x_centered, y_centered, undistorted_w, undistorted_h)

        # 计算鱼眼图像中的极坐标半径（等距投影：半径与极角phi成正比）
        radius_from_center = target_h * phi / self.fov_rad

        # 将极坐标转换为鱼眼图像的笛卡尔坐标（原点在图像中心）
        x_fisheye = radius_from_center * np.cos(theta)
        y_fisheye = radius_from_center * np.sin(theta)

        # 调整坐标系到目标图像的左上角原点（OpenCV标准坐标系）
        target_y_coords = target_h / 2.0 - y_fisheye
        target_x_coords = target_w / 2.0 + x_fisheye

        return target_x_coords, target_y_coords, radius_from_center

    def cartesian_to_spherical(self, x_centered, y_centered, undistorted_w, undistorted_h):
        """
            Return the equirectangular projection on a unit sphere,
            given cartesian coordinates of the de-warped image.
        """
        theta_alt = x_centered * self.fov_rad / undistorted_w
        phi_alt = y_centered * np.pi / undistorted_h

        x = np.sin(theta_alt) * np.cos(phi_alt)
        y = np.sin(phi_alt)
        z = np.cos(theta_alt) * np.cos(phi_alt)

        return np.arctan2(y, x), np.arctan2(np.sqrt(x ** 2 + y ** 2), z)

    def apply_undistortion(self, img: np.ndarray) -> np.ndarray:
        """执行鱼眼校正"""
        return cv2.remap(img, self.target_x_coords, self.target_y_coords, cv2.INTER_LINEAR)


class FeatureMatcher:
    """特征匹配处理器"""

    def __init__(
            self,
            fisheye_img_w: int,
            fisheye_img_h: int,
            undistorted_w: int,
            offset_yl: int = None,
            offset_yr: int = None,
            template_shape: Tuple[int, int] = (60, 16),
            max_features: int = 200,
            threshold: float = 0.9,
            en_debug: bool = True
    ):
        self.fisheye_w = fisheye_img_w
        self.fisheye_h = fisheye_img_h
        self.img_w = self.fisheye_w * 2
        self.undistorted_w = undistorted_w
        self.template_shape = template_shape
        self.max_features = max_features
        self.threshold = threshold
        self.en_debug = en_debug
        self.feature_mode = ['orb', 'sift']

        if offset_yl is None:
            self.offset_yl = int(160 / 1280 * self.fisheye_h)
        else:
            self.offset_yl = offset_yl

        if offset_yr is None:
            self.offset_yr = int(160 / 1280 * self.fisheye_h)
        else:
            self.offset_yr = offset_yr

        # create shift cam buffer
        self.shifted_cams = np.zeros((self.fisheye_h * 2, self.img_w, 3), np.uint8)

        # superpoint
        self.extractor = SuperPoint(max_num_keypoints=2048).eval().to(device)

    def created_shifted_cams(self, img_left: np.ndarray, img_right: np.ndarray):
        if len(img_left.shape) != 3 or len(img_right.shape) != 3:
            raise ValueError("Error, Please input grayscale image")

        self.shifted_cams[self.fisheye_h:, int((self.img_w - self.undistorted_w) / 2): int((self.img_w + self.undistorted_w) / 2)] = img_left

        left_width = int(np.floor(self.undistorted_w / 2))
        right_width = int(np.ceil(self.undistorted_w / 2))

        self.shifted_cams[: self.fisheye_h, :left_width] = img_right[:, right_width:]
        self.shifted_cams[: self.fisheye_h, self.img_w - right_width:] = img_right[:, :right_width]

    def process_keypoints(self, img_source, img_target, kps):
        """处理一个图像中的关键点，并在目标图像中寻找匹配。"""
        Hs, Ws = img_source.shape[:2]
        Ht, Wt = self.template_shape
        matches = []

        # 提前筛选掉太靠近边缘的关键点
        valid_kps = []
        for pt in kps:
            y, x = int(pt.pt[1]), int(pt.pt[0])
            if y + Ht <= Hs and x + Wt <= Ws:
                valid_kps.append((y, x))

        # 批量处理有效关键点
        for y, x in valid_kps:
            template = img_source[y:y + Ht, x:x + Wt]
            result = cv2.matchTemplate(img_target, template, cv2.TM_CCORR_NORMED)
            _, maxVal, _, maxLoc = cv2.minMaxLoc(result)
            if maxVal > self.threshold:
                matches.append((maxVal, (x, y), maxLoc))

        return matches

    @timing_decorator
    def _get_img_features(self, img: np.ndarray):
        if len(img.shape) == 3:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            gray = img

        all_kps = []

        # 获取SIFT特征点
        if 'sift' in self.feature_mode:
            # 预处理增强图像细节
            gray = cv2.equalizeHist(gray)
            gray = cv2.GaussianBlur(gray, (3, 3), 0)
            sift = cv2.SIFT_create(
                nfeatures=0,
                nOctaveLayers=5,
                contrastThreshold=0.04,
                edgeThreshold=15,
                sigma=1.6
            )
            sift_kps = sift.detect(gray, None)
            all_kps.extend(sift_kps)

        # 获取ORB特征点
        if 'orb' in self.feature_mode:
            orb = cv2.ORB_create(
                nfeatures=1000,
                scaleFactor=1.2,
                nlevels=8,
                edgeThreshold=31,
                firstLevel=0,
                WTA_K=2,
                patchSize=31
            )
            orb_kps = orb.detect(gray, None)
            all_kps.extend(orb_kps)

        # 特征点去重和筛选
        if len(all_kps) > 0:
            # 根据响应值排序
            all_kps = sorted(all_kps, key=lambda x: x.response, reverse=True)

            # 空间去重
            filtered_kps = []
            used_positions = set()
            min_distance = 10  # 最小距离阈值

            for kp in all_kps:
                pos = (int(kp.pt[0]), int(kp.pt[1]))
                is_far_enough = True

                # 检查是否与已有点太近
                for used_pos in used_positions:
                    if ((pos[0] - used_pos[0])**2 + (pos[1] - used_pos[1])**2) < min_distance**2:
                        is_far_enough = False
                        break

                if is_far_enough:
                    filtered_kps.append(kp)
                    used_positions.add(pos)

            if self.en_debug:
                img_kp = cv2.drawKeypoints(img, filtered_kps, None, color=(0, 255, 0), flags=0)
                # cv2.imwrite('hybrid_kps.jpg', img_kp)

            return filtered_kps
        else:
            raise ValueError("No keypoints detected")

    @timing_decorator
    def _get_img_features_by_superpoint(self, img: np.ndarray):
        """使用SuperPoint提取特征点"""
        if len(img.shape) == 3:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            gray = img

        # 预处理图像
        gray = cv2.equalizeHist(gray)
        gray = cv2.GaussianBlur(gray, (3, 3), 0)

        # 转换为tensor并归一化
        gray_tensor = torch.from_numpy(gray).float().to(device)
        gray_tensor = gray_tensor.unsqueeze(0).unsqueeze(0) / 255.0  # [1, 1, H, W]

        # 使用SuperPoint提取特征
        with torch.no_grad():
            pred = self.extractor.extract(gray_tensor.to(device))
            keypoints = pred['keypoints'][0].cpu().numpy()  # [N, 2]
            scores = pred['keypoint_scores'][0].cpu().numpy()  # [N]

        # 转换为OpenCV的KeyPoint格式
        cv_keypoints = []
        for i, (x, y) in enumerate(keypoints):
            cv_keypoints.append(
                cv2.KeyPoint(
                    x=float(x),
                    y=float(y),
                    size=8,
                    angle=-1,
                    response=float(scores[i]),
                    octave=0,
                    class_id=-1
                )
            )

        # 根据响应值排序和空间去重
        cv_keypoints = sorted(cv_keypoints, key=lambda x: x.response, reverse=True)

        # 空间去重
        filtered_kps = []
        used_positions = set()
        min_distance = 10  # 最小距离阈值

        for kp in cv_keypoints:
            pos = (int(kp.pt[0]), int(kp.pt[1]))
            is_far_enough = True

            # 检查是否与已有点太近
            for used_pos in used_positions:
                if ((pos[0] - used_pos[0])**2 + (pos[1] - used_pos[1])**2) < min_distance**2:
                    is_far_enough = False
                    break

            if is_far_enough:
                filtered_kps.append(kp)
                used_positions.add(pos)

        if self.en_debug:
            img_kp = cv2.drawKeypoints(img, filtered_kps, None, color=(0, 255, 0), flags=0)
            # cv2.imwrite('superpoint_kps.jpg', img_kp)

        if len(filtered_kps) == 0:
            raise ValueError("No keypoints detected")

        return filtered_kps

    @timing_decorator
    def get_matches_by_good_template(self, img1, img2, max_matches):
        if not np.array_equal(img1.shape, img2.shape):
            raise ValueError(f"Error：shape not equal {img1.shape}, {img2.shape}")

        if not (np.all(np.array(self.template_shape) <= np.array(img1.shape[:2]))):
            raise ValueError("Error：Template shape must be < image shape")

        # 特征点检测
        # kps1 = self._get_img_features(img1)
        # kps2 = self._get_img_features(img2)
        kps1 = self._get_img_features_by_superpoint(img1)
        kps2 = self._get_img_features_by_superpoint(img2)

        # 并行处理两个方向的匹配
        with ThreadPoolExecutor(max_workers=2) as executor:
            future1 = executor.submit(self.process_keypoints, img1, img2, kps1)
            future2 = executor.submit(self.process_keypoints, img2, img1, kps2)
            matches1 = future1.result()
            matches2 = future2.result()

        # 调整第二组匹配的格式以保持一致性
        matches2 = [(val, loc, (x, y)) for val, (x, y), loc in matches2]

        # 合并结果
        all_matches = matches1 + matches2

        # 使用堆结构获取最佳匹配，而不是完全排序
        if len(all_matches) > max_matches:
            best_matches = heapq.nlargest(max_matches, all_matches, key=lambda e: e[0])
        else:
            best_matches = all_matches

        # 提取坐标
        return np.int32([match[1:] for match in best_matches])

    @timing_decorator
    def verticalBoundary(self, homography):
        """
        计算图像变换后的垂直边界。
        """
        # 创建x坐标序列
        x_coords = np.linspace((self.img_w - self.undistorted_w) / 2, (self.img_w + self.undistorted_w) / 2 - 1, self.undistorted_w)

        # 构建上边缘点矩阵 [x, 0, 1]
        top_row = np.ones((self.undistorted_w, 3))
        top_row[:, 0] = x_coords
        top_row[:, 1] = 0  # 上边缘 y=0

        # 构建下边缘点矩阵 [x, H-1, 1]
        bottom_row = top_row.copy()
        bottom_row[:, 1] = self.fisheye_h - 1  # 下边缘 y=H-1

        # 转换上边缘点
        top_transformed = np.dot(homography, top_row.T).T  # 转置以便批量矩阵乘法
        top_normed = top_transformed[:, :2] / top_transformed[:, 2:3]  # 归一化齐次坐标

        # 转换下边缘点
        bottom_transformed = np.dot(homography, bottom_row.T).T
        bottom_normed = bottom_transformed[:, :2] / bottom_transformed[:, 2:3]

        # 找出有效区域内的边界
        valid_region = (x_coords >= self.undistorted_w / 2) & (x_coords < self.img_w - self.undistorted_w / 2)

        # 计算上下边界
        top_valid = top_normed[valid_region, 1]
        bottom_valid = bottom_normed[valid_region, 1]

        top = np.max(top_valid) if len(top_valid) > 0 else 0
        bottom = np.min(bottom_valid) if len(bottom_valid) > 0 else H

        # 确保边界在图像范围内
        top = max(0, int(top))
        bottom = min(self.fisheye_h, int(bottom))

        return top, bottom

    @timing_decorator
    def get_match_results(self, img_l, img_r):
        """
            处理左右图像的匹配和变换。

            参数:
            img_l - 左侧图像
            img_r - 右侧图像

            返回:
            包含warped1, warped2和EAof2的字典
            """
        # 计算常用值，避免重复计算
        half_img_w = int(self.img_w / 2)
        undistorted_half_diff = int((self.img_w - self.undistorted_w) / 2)

        # 提取左右图像的重叠区域
        l_img_l = img_l[self.offset_yl:self.fisheye_h - self.offset_yl, half_img_w:]
        l_img_r = img_r[self.offset_yl:self.fisheye_h - self.offset_yl, :self.undistorted_w - half_img_w]
        r_img_l = img_r[self.offset_yr:self.fisheye_h - self.offset_yr, half_img_w:]
        r_img_r = img_l[self.offset_yr:self.fisheye_h - self.offset_yr, :self.undistorted_w - half_img_w]

        # 获取匹配点
        match_res_l = self.get_matches_by_good_template(l_img_l, l_img_r, self.max_features)
        match_res_r = self.get_matches_by_good_template(r_img_l, r_img_r, self.max_features)
        match_res_r = match_res_r[:, ::-1]  # 更高效的逆序操作，无需指定起始索引

        offset_vector_l = np.array([undistorted_half_diff, self.offset_yl])
        offset_vector_r = np.array([undistorted_half_diff + half_img_w, self.offset_yr])
        match_res_l = match_res_l + offset_vector_l
        match_res_r = match_res_r + offset_vector_r

        # 整合匹配点 - 使用向量化操作
        matches = np.concatenate([match_res_l, match_res_r], axis=0)  # 直接连接两组点
        matches = matches.reshape(-1, 2)  # 确保形状正确

        # 分离点集
        pts1 = matches[::2]  # 获取偶数索引的点（左图点）
        pts2 = matches[1::2]  # 获取奇数索引的点（右图点）

        # 计算单应矩阵
        mat_homography, status = cv2.findHomography(pts2, pts1, cv2.RANSAC, 4.0)

        # 计算内点百分比
        inlier_ratio = np.sum(status) / status.size
        print(f"The percentage of interior points is {inlier_ratio:.2f}")

        # 获取垂直边界
        top, bottom = self.verticalBoundary(mat_homography)

        # 创建移位相机图像
        # self.created_shifted_cams(img_l, img_r)

        # 对图像应用变换
        # 使用一次性变量避免多余的中间变量
        warped2 = cv2.warpPerspective(
            self.shifted_cams[self.fisheye_h:],
            mat_homography,
            (self.img_w, self.fisheye_h)
        )

        warped1 = cv2.resize(
            self.shifted_cams[0:self.fisheye_h, :][top:bottom],
            (self.img_w, self.fisheye_h)
        )

        warped2 = cv2.resize(
            warped2[top:bottom],
            (self.img_w, self.fisheye_h)
        )

        # 创建掩码图像
        EAof2 = np.zeros((self.fisheye_h, self.img_w, 3), np.uint8)
        left_bound = undistorted_half_diff + 1
        right_bound = undistorted_half_diff + self.undistorted_w - 1
        EAof2[:, left_bound:right_bound] = 255
        EAof2 = cv2.warpPerspective(EAof2, mat_homography, (self.img_w, self.fisheye_h))

        return {
            'warped1': warped1,
            'warped2': warped2,
            'EAof2': EAof2
        }


class FisheyeStitcher:
    def __init__(
            self,
            fisheye_shape: Tuple[int, int],
            fov_degree: float,
            offset_shape: Tuple[int, int] = (None, None),
            template_shape: Tuple[int, int] = (60, 16),
            max_features: int = 200,
            threshold: float = 0.9,
            w_lbl: int = 120,
            blend_level: int = 5
    ):
        self.fisheye_shape = fisheye_shape
        self.fov_degree = fov_degree
        self.offset_shape = offset_shape
        self.template_shape = template_shape
        self.max_features = max_features
        self.threshold = threshold
        self.w_lbl = w_lbl
        self.blend_level = blend_level

        if offset_shape[0] is None or offset_shape[1] is None:
            self.offset_shape = (int(160 / 1280 * self.fisheye_shape[0]),
                                 int(160 / 1280 * self.fisheye_shape[0]))

        self.undistorted_w = int(self.fov_degree / 180 * self.fisheye_shape[0])
        self.stitch_img_w = self.fisheye_shape[1] * 2

        # initial ins of matcher, blender and undistorter
        self.undistorter = FisheyeUndistorter(fov_degrees=self.fov_degree,
                                              fisheye_img_w=self.fisheye_shape[1],
                                              fisheye_img_h=self.fisheye_shape[0],
                                              undistorted_w=self.undistorted_w)

        self.matcher = FeatureMatcher(fisheye_img_w=self.fisheye_shape[1],
                                      fisheye_img_h=self.fisheye_shape[0],
                                      undistorted_w=self.undistorted_w,
                                      offset_yl=self.offset_shape[0],
                                      offset_yr=self.offset_shape[1],
                                      template_shape=self.template_shape,
                                      max_features=self.max_features,
                                      threshold=self.threshold)

    def stitch_dual_fisheye(self, img_l: np.ndarray, img_r: np.ndarray):
        if img_l.shape != img_r.shape:
            raise ValueError(f" [Error] The shape of img_l and img_r must be equal!")

        if img_l.shape[0] != self.fisheye_shape[0] or img_l.shape[1] != self.fisheye_shape[1]:
            raise ValueError(f" [Error] The shape of img_l and img_r must be equal with fisheye_shape param!")

        # Undistort
        img_l = self.undistorter.apply_undistortion(img_l)
        img_r = self.undistorter.apply_undistortion(img_r)

        self.matcher.created_shifted_cams(img_l, img_r)

        if img_l.ndim == 3:
            img_l = PreProcess.convert_to_gray(img_l)

        if img_r.ndim == 3:
            img_r = PreProcess.convert_to_gray(img_r)

        # warped1, warped2, EAof2
        match_res = self.matcher.get_match_results(img_l, img_r)

        mask, prev_seam_loc = ImageBlender.compute_seam_mask(
            match_res['warped1'][:, int(self.undistorted_w / 2) - self.w_lbl: int(self.undistorted_w / 2)],
            match_res['warped2'][:, int(self.undistorted_w / 2) - self.w_lbl: int(self.undistorted_w / 2)],
            match_res['warped1'][:, self.stitch_img_w - int(self.undistorted_w / 2): self.stitch_img_w - int(self.undistorted_w / 2) + self.w_lbl],
            match_res['warped2'][:, self.stitch_img_w - int(self.undistorted_w / 2): self.stitch_img_w - int(self.undistorted_w / 2) + self.w_lbl],
            (self.stitch_img_w, self.fisheye_shape[0]),
            int(self.undistorted_w / 2) - self.w_lbl,
            self.stitch_img_w - int(self.undistorted_w / 2)
        )

        match_res['warped1'][:, int(self.undistorted_w / 2): self.stitch_img_w - int(self.undistorted_w / 2)] = \
            match_res['warped2'][:, int(self.undistorted_w / 2): self.stitch_img_w - int(self.undistorted_w / 2)]
        index = match_res['EAof2'] == 0
        match_res['warped2'][index] = match_res['warped1'][index]

        blend_l = ImageBlender.multi_band_blending(
            match_res['warped1'][:, int(self.undistorted_w / 2) - self.w_lbl: int(self.undistorted_w / 2)],
            match_res['warped2'][:, int(self.undistorted_w / 2) - self.w_lbl: int(self.undistorted_w / 2)],
            mask[:, int(self.undistorted_w / 2) - self.w_lbl: int(self.undistorted_w / 2)],
            self.blend_level
        )
        blend_r = ImageBlender.multi_band_blending(
            match_res['warped1'][:, self.stitch_img_w - int(self.undistorted_w / 2): self.stitch_img_w - int(self.undistorted_w / 2) + self.w_lbl],
            match_res['warped2'][:, self.stitch_img_w - int(self.undistorted_w / 2): self.stitch_img_w - int(self.undistorted_w / 2) + self.w_lbl],
            mask[:, self.stitch_img_w - int(self.undistorted_w / 2): self.stitch_img_w - int(self.undistorted_w / 2) + self.w_lbl],
            self.blend_level
        )

        blended_img = np.ones((self.fisheye_shape[0], self.stitch_img_w, 3))
        blended_img[:, 0: int(self.undistorted_w / 2) - self.w_lbl] = match_res['warped1'][:, 0: int(self.undistorted_w / 2) - self.w_lbl]
        blended_img[:, int(self.undistorted_w / 2) - self.w_lbl: int(self.undistorted_w / 2)] = blend_l
        blended_img[:, int(self.undistorted_w / 2): self.stitch_img_w - int(self.undistorted_w / 2)] = \
            match_res['warped2'][:, int(self.undistorted_w / 2): self.stitch_img_w - int(self.undistorted_w / 2)]
        blended_img[:, self.stitch_img_w - int(self.undistorted_w / 2): self.stitch_img_w - int(self.undistorted_w / 2) + self.w_lbl] = blend_r
        blended_img[:, self.stitch_img_w - int(self.undistorted_w / 2) + self.w_lbl:] = \
            match_res['warped1'][:, self.stitch_img_w - int(self.undistorted_w / 2) + self.w_lbl:]
        blended_img = blended_img.astype(np.uint8)

        return blended_img


def test_dual_fisheye_stitch():
    """----------------------------------------------------"""
    img_l_path = r"C:\ws_rk\ws_2025\image-stitching\test\a.png"
    img_r_path = r"C:\ws_rk\ws_2025\image-stitching\test\b.png"

    fov_degree = 195
    """----------------------------------------------------"""

    frame_l = cv2.imread(img_l_path)
    frame_r = cv2.imread(img_r_path)
    processor = PreProcess()
    # frame_l = processor.auto_crop_fisheye_image(img=frame_l, is_first=True, en_morphology=False)
    # frame_r = processor.auto_crop_fisheye_image(img=frame_r, is_first=False, en_morphology=False)
    # frame_l2 = PreProcess.crop_fisheye_image_new(frame_l)
    # frame_r2 = PreProcess.crop_fisheye_image_new(frame_r)
    frame_l = PreProcess.manual_crop_image(frame_l, 576, 3396, 99, 2919)
    frame_r = PreProcess.manual_crop_image(frame_r, 564, 3384, 94, 2914)

    if frame_l.shape != frame_r.shape:
        raise ValueError(f" [Error] The shape of frame_l and frame_r must be equal!")

    frame_shape = (width, height) = frame_l.shape[1], frame_l.shape[0]

    stitcher = FisheyeStitcher(
        fisheye_shape=frame_shape,
        fov_degree=fov_degree
    )

    stitched_img = stitcher.stitch_dual_fisheye(frame_l, frame_r)
    cv2.imshow('stitched_img.png', stitched_img)
    print('Done')


if __name__ == '__main__':
    test_dual_fisheye_stitch()
