import cv2
import time
import asyncio
import numpy as np
from dataclasses import dataclass
from typing import Optional, Dict, Any, Tuple

import pycolmap
import PIL.Image

import torch

# hloc dynamic loading
from hloc import extractors, matchers
from hloc import extract_features, match_features, pairs_from_retrieval
from hloc.utils.base_model import dynamic_load

import os

def draw_matches(
    img1: np.ndarray, 
    kpts1: np.ndarray, 
    img2: np.ndarray, 
    kpts2: np.ndarray, 
    matches: np.ndarray, 
    save_path: str,
    max_matches: int = 100,
    inlier_color: Tuple[int, int, int] = (0, 255, 0),
    single_point_color: Tuple[int, int, int] = (0, 0, 255)
) -> None:
    """
    绘制并保存特征匹配结果
    
    Args:
        img1: 第一张图像 (BGR格式)
        kpts1: 第一张图像的特征点, shape (N, 2)
        img2: 第二张图像 (BGR格式)
        kpts2: 第二张图像的特征点, shape (M, 2)
        matches: 匹配结果, shape (N,), 其中matches[i]是与kpts1[i]匹配的kpts2索引, -1表示未匹配
        save_path: 保存图像的路径
        max_matches: 最多显示的匹配数量，-1表示显示全部匹配
        inlier_color: 匹配成功的连接线和点的颜色 (BGR)
        single_point_color: 未匹配点的颜色 (BGR)
    """
    # 确保图像是彩色的
    if len(img1.shape) == 2:
        img1 = cv2.cvtColor(img1, cv2.COLOR_GRAY2BGR)
    if len(img2.shape) == 2:
        img2 = cv2.cvtColor(img2, cv2.COLOR_GRAY2BGR)
    
    # 获取图像尺寸
    h1, w1 = img1.shape[:2]
    h2, w2 = img2.shape[:2]
    
    # 创建一个拼接图像
    vis = np.zeros((max(h1, h2), w1 + w2, 3), dtype=np.uint8)
    vis[:h1, :w1] = img1
    vis[:h2, w1:w1 + w2] = img2
    
    # 绘制未匹配的特征点
    for (x, y) in kpts1:
        if x < 0 or y < 0 or x >= w1 or y >= h1:
            continue
        cv2.circle(vis, (int(x), int(y)), 3, single_point_color, -1)
    
    for (x, y) in kpts2:
        if x < 0 or y < 0 or x >= w2 or y >= h2:
            continue
        cv2.circle(vis, (int(x) + w1, int(y)), 3, single_point_color, -1)
    
    # 绘制匹配的特征点和连接线
    matched_indices = np.where(matches != -1)[0]
    # 限制显示的匹配数量（max_matches=-1 表示显示全部）
    if max_matches > 0 and len(matched_indices) > max_matches:
        matched_indices = matched_indices[:max_matches]
    
    for i in matched_indices:
        j = matches[i]
        
        # 第一张图像中的点
        x1, y1 = kpts1[i]
        # 第二张图像中的点（加上偏移）
        x2, y2 = kpts2[j]
        x2 += w1
        
        # 确保点在图像范围内
        if x1 < 0 or y1 < 0 or x1 >= w1 or y1 >= h1:
            continue
        if x2 < w1 or y2 < 0 or x2 >= w1 + w2 or y2 >= h2:
            continue
        
        # 绘制连接线
        cv2.line(vis, (int(x1), int(y1)), (int(x2), int(y2)), inlier_color, 1)
        
        # 绘制匹配点
        cv2.circle(vis, (int(x1), int(y1)), 3, inlier_color, -1)
        cv2.circle(vis, (int(x2), int(y2)), 3, inlier_color, -1)
    
    # 保存图像
    cv2.imwrite(save_path, vis)
    print(f"特征匹配结果已保存至: {save_path}")


def draw_reprojection_errors(
    image: np.ndarray,
    points2D: np.ndarray,
    points2D_reproj: np.ndarray,
    reproj_errors: np.ndarray,
    inlier_mask: np.ndarray,
    save_path: str,
    max_error_display: float = 10.0,
    orig_point_color: Tuple[int, int, int] = (0, 255, 0),
    reproj_point_color: Tuple[int, int, int] = (0, 0, 255),
    line_color: Tuple[int, int, int] = (0, 255, 255)
) -> None:
    """
    绘制并保存重投影误差可视化结果
    
    Args:
        image: 原始图像 (BGR格式)
        points2D: 原始2D特征点, shape (N, 2)
        points2D_reproj: 重投影的2D点, shape (N, 2)
        reproj_errors: 重投影误差, shape (N,)
        inlier_mask: 内点掩码, shape (N,), True表示内点
        save_path: 保存图像的路径
        max_error_display: 最大显示误差值。< 0 表示显示所有误差，>= 0 表示只显示小于此值的误差
        orig_point_color: 原始点颜色 (BGR)
        reproj_point_color: 重投影点颜色 (BGR)
        line_color: 连接线颜色 (BGR)
    """
    # 确保图像是彩色的
    img_vis = image.copy()
    if len(img_vis.shape) == 2:
        img_vis = cv2.cvtColor(img_vis, cv2.COLOR_GRAY2BGR)
    
    h, w = img_vis.shape[:2]
    
    # 获取内点索引
    inlier_indices = np.where(inlier_mask)[0]
    
    if len(inlier_indices) == 0:
        print("Warning: No inliers to visualize")
        cv2.imwrite(save_path, img_vis)
        return
    
    # 只绘制内点
    inlier_points2D = points2D[inlier_indices]
    inlier_points2D_reproj = points2D_reproj[inlier_indices]
    inlier_errors = reproj_errors[inlier_indices]
    
    # 统计有效/无效的重投影点
    valid_count = 0
    invalid_count = 0
    
    for i in range(len(inlier_indices)):
        pt_orig = inlier_points2D[i]
        pt_reproj = inlier_points2D_reproj[i]
        error = inlier_errors[i]
        
        # 检查原始点和重投影点的有效性（在图像范围内）
        orig_valid = (0 <= pt_orig[0] < w) and (0 <= pt_orig[1] < h)
        reproj_valid = (0 <= pt_reproj[0] < w) and (0 <= pt_reproj[1] < h)
        
        # 只绘制两个点都有效的
        if not (orig_valid and reproj_valid):
            invalid_count += 1
            continue
        
        valid_count += 1
        
        # 转换为整数坐标用于绘制
        pt_orig_int = tuple(pt_orig.astype(int))
        pt_reproj_int = tuple(pt_reproj.astype(int))
        
        # 绘制原始2D点（绿色圆）
        cv2.circle(img_vis, pt_orig_int, radius=5, color=orig_point_color, thickness=-1)
        
        # 绘制重投影点（红色圆）
        cv2.circle(img_vis, pt_reproj_int, radius=5, color=reproj_point_color, thickness=-1)
        
        # 绘制连接线（黄色），显示重投影误差
        cv2.line(img_vis, pt_orig_int, pt_reproj_int, color=line_color, thickness=1)
        
        # 在连接线中点显示误差值
        # max_error_display < 0 表示显示所有误差，否则只显示误差小于阈值的
        if max_error_display < 0 or (error > 0 and error < max_error_display):
            mid_point = ((pt_orig_int[0] + pt_reproj_int[0]) // 2, 
                        (pt_orig_int[1] + pt_reproj_int[1]) // 2)
            error_text = f"{error:.2f}"
            cv2.putText(img_vis, error_text, mid_point, 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)
    
    # 保存可视化结果
    cv2.imwrite(save_path, img_vis)
    
    # 打印统计信息
    print(f"Reprojection visualization saved to: {save_path}")
    print(f"  Valid points drawn: {valid_count}")
    print(f"  Invalid points (out of bounds): {invalid_count}")
    print(f"  Total inliers: {len(inlier_indices)}")


def draw_keypoints(
    image: np.ndarray,
    keypoints: np.ndarray,
    save_path: str,
    radius: int = 5,
    color: Tuple[int, int, int] = (0, 255, 0),
    thickness: int = -1
) -> None:
    """
    绘制特征点到图像上并保存
    
    Args:
        image: 原始图像 (BGR格式)
        keypoints: 特征点坐标, shape (N, 2), 格式为 (x, y)
        save_path: 保存图像的路径
        radius: 特征点圆圈半径（像素）
        color: 特征点颜色 (BGR格式)，默认绿色
        thickness: 圆圈线宽，-1 表示填充实心圆
    """
    # 创建图像副本，避免修改原图
    img_with_kpts = image.copy()
    
    # 确保图像是彩色的
    if len(img_with_kpts.shape) == 2:
        img_with_kpts = cv2.cvtColor(img_with_kpts, cv2.COLOR_GRAY2BGR)
    
    # 遍历所有特征点并绘制
    for (x, y) in keypoints:
        # 将坐标转换为整数（OpenCV绘图需要整数坐标）
        x_int = int(round(x))
        y_int = int(round(y))
        
        # 检查点是否在图像范围内
        h, w = img_with_kpts.shape[:2]
        if 0 <= x_int < w and 0 <= y_int < h:
            # 绘制圆标记特征点
            cv2.circle(
                img_with_kpts, 
                (x_int, y_int), 
                radius=radius, 
                color=color,
                thickness=thickness
            )
    
    # 保存图像
    cv2.imwrite(save_path, img_with_kpts)
    print(f"Keypoints visualization saved to: {save_path}")
    print(f"  Total keypoints: {len(keypoints)}")


def compute_reprojection_errors(
    points2D: np.ndarray,
    points3D: np.ndarray,
    cam_from_world: pycolmap.Rigid3d,
    camera_config: Dict[str, Any],
    inlier_mask: Optional[np.ndarray] = None,
    image_size: Optional[Tuple[int, int]] = None
) -> Dict[str, Any]:
    """
    计算重投影误差（只对内点或所有点计算）
    
    Args:
        points2D: 原始2D特征点, shape (N, 2)
        points3D: 对应的3D点, shape (N, 3)
        cam_from_world: 相机位姿（从世界坐标系到相机坐标系的变换）
        camera_config: 相机配置字典，包含 model, width, height, params
        inlier_mask: 可选的内点掩码, shape (N,)。如果提供，只计算内点的误差
        image_size: 可选的图像尺寸 (width, height)。如果不提供，从 camera_config 获取
    
    Returns:
        结果字典，包含：
        - reproj_errors: 重投影误差, shape (N,) 或 (M,) 如果提供了 inlier_mask
        - points2D_reproj: 重投影的2D点, shape (N, 2) 或 (M, 2)
        - visible_mask: 点在相机前方的掩码, shape (N,) 或 (M,)
        - in_bounds_mask: 点在图像范围内的掩码, shape (N,) 或 (M,)
        - valid_mask: 综合有效性掩码（visible & in_bounds）, shape (N,) 或 (M,)
        - mean_error: 有效点的平均重投影误差
        - median_error: 有效点的中位数重投影误差
        - num_valid: 有效点的数量
        - num_total: 总点数（或内点总数）
    """
    # 如果提供了 inlier_mask，只处理内点
    if inlier_mask is not None:
        points2D_input = points2D[inlier_mask]
        points3D_input = points3D[inlier_mask]
    else:
        points2D_input = points2D
        points3D_input = points3D
    
    num_total = len(points2D_input)
    
    # 获取图像尺寸
    if image_size is not None:
        w, h = image_size
    else:
        w = camera_config["width"]
        h = camera_config["height"]
    
    # 创建 pycolmap Camera 对象
    camera = pycolmap.Camera(
        model=camera_config["model"],
        width=camera_config["width"],
        height=camera_config["height"],
        params=camera_config["params"]
    )
    
    # 将3D点从世界坐标系转换到相机坐标系
    points3D_cam = np.array([cam_from_world * pt for pt in points3D_input])  # (M, 3)
    
    # 检查点是否在相机前方
    visible_mask = points3D_cam[:, 2] > 1e-4  # z > 0
    
    # 投影到图像平面
    points3D_cam_norm = points3D_cam[:, :2] / points3D_cam[:, 2:3]  # (M, 2)
    points2D_reproj = camera.img_from_cam(points3D_cam_norm)  # (M, 2)
    
    # 检查重投影点是否在图像范围内：0 <= u < w, 0 <= v < h
    in_bounds_mask = (
        (points2D_reproj[:, 0] >= 0) & (points2D_reproj[:, 0] < w) &
        (points2D_reproj[:, 1] >= 0) & (points2D_reproj[:, 1] < h)
    )
    
    # 综合有效性：在相机前方 & 在图像范围内
    valid_mask = visible_mask & in_bounds_mask
    
    # 计算重投影误差
    reproj_errors = np.linalg.norm(points2D_input - points2D_reproj, axis=1)  # (M,)
    
    # 计算统计信息（只对有效点）
    if valid_mask.sum() > 0:
        mean_error = float(np.mean(reproj_errors[valid_mask]))
        median_error = float(np.median(reproj_errors[valid_mask]))
    else:
        mean_error = float('inf')
        median_error = float('inf')
    
    return {
        'reproj_errors': reproj_errors,
        'points2D_reproj': points2D_reproj,
        'visible_mask': visible_mask,
        'in_bounds_mask': in_bounds_mask,
        'valid_mask': valid_mask,
        'mean_error': mean_error,
        'median_error': median_error,
        'num_valid': int(valid_mask.sum()),
        'num_total': num_total,
    }


def resize_image(image, size, interp):
    """Resize图像，和ImageDataset中的resize_image函数保持一致"""
    if interp.startswith("cv2_"):
        interp = getattr(cv2, "INTER_" + interp[len("cv2_") :].upper())
        h, w = image.shape[:2]
        if interp == cv2.INTER_AREA and (w < size[0] or h < size[1]):
            interp = cv2.INTER_LINEAR
        resized = cv2.resize(image, size, interpolation=interp)
    elif interp.startswith("pil_"):
        interp = getattr(PIL.Image, interp[len("pil_") :].upper())
        resized = PIL.Image.fromarray(image.astype(np.uint8))
        resized = resized.resize(size, resample=interp)
        resized = np.asarray(resized, dtype=image.dtype)
    else:
        raise ValueError(f"Unknown interpolation {interp}.")
    return resized


def cv2tensor(cv_image: np.ndarray, 
              conf: Optional[Dict[str, Any]] = None,
              device: str = 'cuda') -> Tuple[torch.Tensor, np.ndarray]:
    """
    将OpenCV Mat转换为模型输入tensor，参照ImageDataset.__getitem__的实现
    
    Args:
        cv_image: OpenCV读取的图像（BGR格式），shape (H, W, 3) 或 (H, W)，dtype uint8
        conf: 预处理配置字典，包含：
            - grayscale: bool, 是否转为灰度图（默认False）
            - resize_max: int, 最大尺寸（默认None，不resize）
            - resize_force: bool, 是否强制resize（默认False）
            - interpolation: str, 插值方法（默认"cv2_area"）
        device: 目标设备（'cuda' 或 'cpu'）
    
    Returns:
        image_tensor: 预处理后的tensor, shape (1, C, H, W), dtype float32, 值范围[0, 1]
        original_size: 原始图像尺寸, shape (2,), [width, height]
    """
    # 默认配置（和ImageDataset.default_conf保持一致）
    default_conf = {
        "grayscale": False,
        "resize_max": None,
        "resize_force": False,
        "interpolation": "cv2_area",
    }
    
    # 合并配置
    if conf is None:
        conf = default_conf
    else:
        conf = {**default_conf, **conf}
    
    # 步骤1: BGR转RGB（如果是彩色图）
    if len(cv_image.shape) == 3:
        # 彩色图: BGR -> RGB
        image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
    elif len(cv_image.shape) == 2:
        # 已经是灰度图
        image = cv_image.copy()
    else:
        raise ValueError(f"Unsupported image shape: {cv_image.shape}")
    
    # 步骤2: 转为float32
    image = image.astype(np.float32)
    
    # 步骤3: 获取原始尺寸 (width, height)
    size = image.shape[:2][::-1]  # (H, W) -> (W, H)
    original_size = np.array(size, dtype=np.float32)
    
    # 步骤4: Resize（如果需要）
    if conf["resize_max"] and (
        conf["resize_force"] or max(size) > conf["resize_max"]
    ):
        scale = conf["resize_max"] / max(size)
        size_new = tuple(int(round(x * scale)) for x in size)
        image = resize_image(image, size_new, conf["interpolation"])
    
    # 步骤5: 根据grayscale决定通道处理
    if conf["grayscale"]:
        # 灰度图: (H, W) -> (1, H, W)
        if len(image.shape) == 2:
            image = image[None, :, :]  # 添加通道维度
        elif len(image.shape) == 3:
            # 如果是彩色图但要求灰度，转为灰度
            image = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_RGB2GRAY).astype(np.float32)
            image = image[None, :, :]
    else:
        # 彩色图: (H, W, C) -> (C, H, W)
        if len(image.shape) == 3:
            image = image.transpose((2, 0, 1))  # (H, W, 3) -> (3, H, W)
        else:
            raise ValueError(f"Expected color image but got grayscale with grayscale=False")
    
    # 步骤6: 归一化到[0, 1]
    image = image / 255.0
    
    # 步骤7: 添加batch维度并转为tensor
    image = image[np.newaxis, :, :, :]  # (C, H, W) -> (1, C, H, W)
    image_tensor = torch.from_numpy(image).float().to(device)
    
    return image_tensor, original_size


@dataclass
class LocalizationResult:
    success: bool                  # 定位是否成功
    translation: Optional[np.ndarray] = None  # 平移向量 (x, y, z)
    quaternion: Optional[np.ndarray] = None   # 四元数 (w, x, y, z)
    inlier_count: int = 0          # 内点数量
    reprojection_error: float = 0.0 # 平均重投影误差
    execution_time: float = 0.0    # 执行时间(毫秒)
    confidence: float = 0.0        # 定位置信度(0-1)
    additional_info: Optional[Dict[str, Any]] = None  # 其他补充信息

class VisualLocalizer:
    def __init__(self):
        self.device = None
        self.retrieval = None
        self.feature = None
        self.matcher = None
        self.reconstruction = None
        self.db_global_features = None  # (names: List[str], descs: np.ndarray [N,D])
        self.db_local_features_path = None  # Path to DB SuperPoint features (.h5)
        self.top_k = 20
    
    def initialize(self, 
        reconstruction_path: str,
        db_global_features_path: Optional[str] = None,
        db_local_features_path: Optional[str] = None,
        params: Optional[Dict[str, Any]] = None,
        top_k: int = 20) -> bool:

        print(f"Loading SfM model from {reconstruction_path}")
        self.reconstruction = pycolmap.Reconstruction(reconstruction_path)
        print(f"Loaded reconstruction with {len(self.reconstruction.images)} images "
              f"and {len(self.reconstruction.points3D)} 3D points")

        # Build and cache models once for reuse.
        params = params or {}
        self.device = "cuda" if torch.cuda.is_available() else "cpu"
        self.top_k = int(top_k) if top_k else 20


        # Build NetVLAD
        print("Loading netvlad model")
        retrieval_conf = extract_features.confs["netvlad"]["model"]
        retrieval_class = dynamic_load(extractors, retrieval_conf["name"])
        self.retrieval = retrieval_class(retrieval_conf).eval().to(self.device)

        # Build SuperPoint
        print("Loading superpoint model")
        feature_conf = extract_features.confs["superpoint_aachen"]["model"]
        feature_class = dynamic_load(extractors, feature_conf["name"])
        self.feature = feature_class(feature_conf).eval().to(self.device)

        # Build LightGlue (for SuperPoint descriptors)
        print("Loading lightglue model")
        # lightglue_conf = match_features.confs["superpoint+lightglue"]["model"]
        matcher_conf = match_features.confs["superglue"]["model"]
        matcher_class = dynamic_load(matchers, matcher_conf["name"])
        self.matcher = matcher_class(matcher_conf).eval().to(self.device)

        # Load DB global descriptors (HDF5)
        if db_global_features_path is not None:
            import h5py
            names = []
            descs = []
            with h5py.File(db_global_features_path, "r") as f:
                for name in f.keys():
                    if "global_descriptor" in f[name]:
                        d = f[name]["global_descriptor"].__array__()
                        descs.append(d)
                        names.append(name)
            if len(descs) == 0:
                raise ValueError("No global_descriptor found in DB features file")
            descs = np.stack(descs, axis=0).astype(np.float32)
            # Normalize to ensure cosine similarity
            norms = np.linalg.norm(descs, axis=1, keepdims=True) + 1e-12
            descs = descs / norms
            self.db_global_features = (names, descs)

        # Store DB local features path (SuperPoint)
        if db_local_features_path is not None:
            self.db_local_features_path = db_local_features_path

        return True
    
    async def localize(self, image: np.ndarray) -> LocalizationResult:
        """
        定位函数
        
        Args:
            image: OpenCV读取的图像（BGR格式），numpy array, shape (H, W, 3) 或 (H, W)
        
        Returns:
            LocalizationResult: 定位结果
        """
        timing_stats = {}
        start_total = time.time()
        
        # ====================================================================
        # Step 1: 全局特征提取 (NetVLAD)
        # ====================================================================
        print("\n[1/5] Extracting global features...")
        start = time.time()
        
        retrieval_config = extract_features.confs["netvlad"]
        image_tensor, _ = cv2tensor(image, conf=retrieval_config["preprocessing"], device=self.device)
        
        with torch.no_grad():
            pred = self.retrieval({"image": image_tensor})
            q_desc = pred["global_descriptor"][0].cpu().numpy().astype(np.float32)
        q_desc = q_desc / (np.linalg.norm(q_desc) + 1e-12)  # L2 normalize
        
        timing_stats['global_feature_extraction'] = time.time() - start
        print(f"✓ Completed in {timing_stats['global_feature_extraction']:.3f}s")

        # ====================================================================
        # Step 2: 图像检索
        # ====================================================================
        print("\n[2/5] Retrieving similar images...")
        start = time.time()
        
        if not self.db_global_features:
            raise ValueError("DB global features not loaded. Provide db_global_features_path in initialize().")
        
        names, db_descs = self.db_global_features
        sims = db_descs @ q_desc  # cosine similarity
        topk_idx = np.argsort(-sims)[: self.top_k]
        retrieved = [names[i] for i in topk_idx]
        
        timing_stats['image_retrieval'] = time.time() - start
        print(f"✓ Retrieved {len(retrieved)} images in {timing_stats['image_retrieval']:.3f}s")

        # ====================================================================
        # Step 3: 局部特征提取 (SuperPoint)
        # ====================================================================
        print("\n[3/5] Extracting local features...")
        start = time.time()
        
        feature_config = extract_features.confs["superpoint_aachen"]
        q_img_tensor, q_orig_size = cv2tensor(image, conf=feature_config["preprocessing"], device=self.device)
        
        with torch.no_grad():
            q_pred = self.feature({"image": q_img_tensor})
        
        # 提取特征并缩放回原始图像尺寸
        q_kpts = q_pred["keypoints"][0].cpu().numpy()
        q_descs = q_pred["descriptors"][0].cpu().numpy()  # (D, N) format
        q_scores = q_pred["scores"][0].cpu().numpy()
        
        size = np.array(q_img_tensor.shape[-2:][::-1], dtype=np.float32)
        scales = (q_orig_size / size).astype(np.float32)
        q_kpts = (q_kpts + 0.5) * scales[None] - 0.5
        
        timing_stats['local_feature_extraction'] = time.time() - start
        print(f"✓ Extracted {len(q_kpts)} keypoints in {timing_stats['local_feature_extraction']:.3f}s")
        
        '''
        # 可视化：绘制提取的特征点
        draw_keypoints(
            image=image,
            keypoints=q_kpts,
            save_path="query_keypoints.jpg",
            radius=5,
            color=(0, 255, 0),
            thickness=-1
        )
        '''

        # ====================================================================
        # Step 4: 特征匹配 (SuperGlue)
        # ====================================================================
        print("\n[4/5] Matching features...")
        start = time.time()
        
        if not self.db_local_features_path:
            raise ValueError("DB local features path not set. Provide db_local_features_path in initialize().")

        import h5py
        
        all_points2D = []
        all_points3D_ids = []
        name_to_id = {img.name: img_id for img_id, img in self.reconstruction.images.items()}
        
        # 准备 query 特征数据（参照 FeaturePairsDataset 格式）
        h, w = image.shape[:2]
        q_data = {
            "keypoints0": torch.from_numpy(q_kpts).float(),
            "descriptors0": torch.from_numpy(q_descs).float(),
            "scores0": torch.from_numpy(q_scores).float(),
            "image0": torch.empty((1, h, w))
        }
        
        # 对每个检索到的图像进行特征匹配
        with h5py.File(self.db_local_features_path, "r") as fd:
            for db_name in retrieved:
                print(db_name)
                if db_name not in fd or db_name not in name_to_id:
                    continue
                
                grp = fd[db_name]
                if "keypoints" not in grp or "descriptors" not in grp:
                    continue
                
                # 读取数据库特征并合并
                db_data = {k + "1": torch.from_numpy(v.__array__()).float() for k, v in grp.items()}
                db_data["image1"] = torch.empty((1,) + tuple(grp["image_size"])[::-1])
                data = {**q_data, **db_data}
                
                # 添加 batch 维度并移动到设备
                data = {
                    k: v[None] if k.startswith("image") else v[None].to(self.device, non_blocking=True)
                    for k, v in data.items()
                }
                
                # 执行匹配
                with torch.no_grad():
                    m = self.matcher(data)["matches0"][0].cpu().short().numpy()
                

                '''
                # 可视化：绘制特征匹配结果（如果数据库图像路径可用）
                db_img_dir = "/media/sdisk/data/3dgs/220kv/images-selected/"
                db_img_path = os.path.join(db_img_dir, db_name)
                if os.path.exists(db_img_path):
                    db_img = cv2.imread(db_img_path)
                    if db_img is not None:
                        db_kpts_np = data["keypoints1"][0].cpu().numpy()
                        save_path = f"match_{db_name}"
                        draw_matches(
                            img1=image,
                            kpts1=q_kpts,
                            img2=db_img,
                            kpts2=db_kpts_np,
                            matches=m,
                            save_path=save_path,
                            max_matches=100
                        )
                '''

                # 过滤有效匹配（有对应 3D 点）
                idx_q = np.where(m != -1)[0]
                idx_db = m[idx_q]
                if len(idx_q) == 0:
                    continue
                
                db_id = name_to_id[db_name]
                db_image = self.reconstruction.images[db_id]
                if db_image.num_points3D == 0:
                    continue
                
                points3D_ids = np.array([
                    p.point3D_id if p.has_point3D() else -1 for p in db_image.points2D
                ], dtype=np.int64)
                
                valid_mask = points3D_ids[idx_db] != -1
                idx_q = idx_q[valid_mask]
                idx_db = idx_db[valid_mask]
                if len(idx_q) == 0:
                    continue
                
                # 收集 2D-3D 对应点
                all_points2D.append(q_kpts[idx_q])
                all_points3D_ids.append(np.array([
                    self.reconstruction.points3D[int(pid)].xyz for pid in points3D_ids[idx_db]
                ], dtype=np.float64))

        if len(all_points2D) == 0:
            print("✗ No matches found")
            return LocalizationResult(success=False, confidence=0.0)
        
        points2D = np.concatenate(all_points2D, axis=0)
        points3D = np.concatenate(all_points3D_ids, axis=0)
        
        timing_stats['feature_matching'] = time.time() - start
        print(f"✓ Found {len(points2D)} 2D-3D correspondences in {timing_stats['feature_matching']:.3f}s")

        # ====================================================================
        # Step 5: 位姿估计 (PnP + RANSAC)
        # ====================================================================
        print("\n[5/5] Estimating camera pose...")
        start = time.time()
        
        camera_config = {
            "model": "SIMPLE_PINHOLE",
            "width": w,
            "height": h,
            "params": [1800.34, 640, 360, 0],
            # "params": [3701.34, 2432, 1824, -0.00926807],
        }
        
        estimation_options = pycolmap.AbsolutePoseEstimationOptions()
        estimation_options.ransac.max_error = 5
        estimation_options.estimate_focal_length = True
        
        estimation_result = pycolmap.estimate_and_refine_absolute_pose(
            points2D, points3D, camera_config, estimation_options
        )
        
        if estimation_result is None:
            print("✗ Pose estimation failed")
            return LocalizationResult(success=False, confidence=0.0)
        
        cam_from_world = estimation_result["cam_from_world"]
        num_inliers = int(estimation_result["num_inliers"])
        inlier_mask = estimation_result["inlier_mask"]

        timing_stats['pose_estimation'] = time.time() - start
        print(f"✓ Pose estimated with {num_inliers}/{len(points2D)} inliers in {timing_stats['pose_estimation']:.3f}s")
        
        # 计算重投影误差
        reproj_result = compute_reprojection_errors(
            points2D=points2D,
            points3D=points3D,
            cam_from_world=cam_from_world,
            camera_config=camera_config,
            inlier_mask=inlier_mask,
            image_size=(w, h)
        )
        
        mean_reproj_error = reproj_result['mean_error']
        valid_inliers = reproj_result['num_valid']
        
        # 可视化：绘制重投影误差（计算所有点用于可视化）
        reproj_result_all = compute_reprojection_errors(
            points2D=points2D,
            points3D=points3D,
            cam_from_world=cam_from_world,
            camera_config=camera_config,
            inlier_mask=None,  # 所有点
            image_size=(w, h)
        )

        
        draw_reprojection_errors(
            image=image,
            points2D=points2D,
            points2D_reproj=reproj_result_all['points2D_reproj'],
            reproj_errors=reproj_result_all['reproj_errors'],
            inlier_mask=inlier_mask,
            save_path="reprojection_visualization.jpg",
            max_error_display=10.0
        )
        

        # 总时间统计
        timing_stats['total'] = time.time() - start_total
        
        # 打印时间摘要
        print("\n" + "="*60)
        print("⏱️  TIMING SUMMARY")
        print("="*60)
        for key, value in timing_stats.items():
            if key != 'total':
                percentage = (value / timing_stats['total']) * 100
                print(f"  {key:30s}: {value:6.3f}s ({percentage:5.1f}%)")
        print("-"*60)
        print(f"  {'TOTAL':30s}: {timing_stats['total']:6.3f}s")
        print("="*60)
        print(f"✓ Inliers: {valid_inliers}/{num_inliers} valid")
        print(f"✓ Reprojection error: {mean_reproj_error:.2f} pixels")
        print("="*60 + "\n")

        return LocalizationResult(
            success=True,
            translation=np.array(cam_from_world.translation),
            quaternion=np.array(cam_from_world.rotation.quat[[3, 0, 1, 2]]),
            inlier_count=num_inliers,
            reprojection_error=mean_reproj_error,
            execution_time=timing_stats['total'] * 1000,  # 转换为毫秒
            confidence=float(valid_inliers) / num_inliers if num_inliers > 0 else 0.0,
            additional_info={
                "retrieved": retrieved,
                "num_correspondences": int(points2D.shape[0]),
                "timing_stats": timing_stats,
                "valid_inliers": valid_inliers,
            }
        )