#!/usr/bin/env python3
"""
Simplified Single Image Localization for Docker
简化版单张图像定位脚本，适用于Docker环境
"""

import sys
from pathlib import Path
import json
import numpy as np
import h5py
import pycolmap
import time
from typing import Dict, List, Optional, Tuple

from hloc import extract_features, match_features, pairs_from_retrieval
from hloc.utils.io import get_matches, read_image
from hloc.utils.parsers import parse_retrieval
from hloc.utils.viz import plot_images, plot_matches, plot_keypoints, add_text, cm_RdGn
from hloc.utils.viz_3d import init_figure, plot_camera_colmap, plot_points, plot_reconstruction
import shutil
import matplotlib.pyplot as plt


def convert_to_serializable(obj):
    """
    递归地将包含 NumPy 类型的对象转换为 JSON 可序列化的格式
    """
    if isinstance(obj, np.ndarray):
        return obj.tolist()
    elif isinstance(obj, np.integer):
        return int(obj)
    elif isinstance(obj, np.floating):
        return float(obj)
    elif isinstance(obj, dict):
        return {key: convert_to_serializable(value) for key, value in obj.items()}
    elif isinstance(obj, (list, tuple)):
        return [convert_to_serializable(item) for item in obj]
    elif isinstance(obj, Path):
        return str(obj)
    else:
        return obj


def visualize_2d_matches(
    query_image_path: Path,
    retrieved_images: List[str],
    reconstruction: pycolmap.Reconstruction,
    matches_path: Path,
    query_keypoints: np.ndarray,
    inlier_mask: np.ndarray,
    query_name: str,
    db_image_dir: Path,
    output_path: Path,
    top_k: int = 3,
    dpi: int = 100
):
    """
    可视化查询图像与数据库图像之间的2D特征匹配
    
    Args:
        query_image_path: 查询图像路径
        retrieved_images: 检索到的数据库图像名称列表
        reconstruction: COLMAP重建对象
        matches_path: 匹配结果路径
        query_keypoints: 查询图像的关键点
        inlier_mask: inlier掩码
        query_name: 查询图像名称
        db_image_dir: 数据库图像目录
        output_path: 输出图像路径
        top_k: 显示前k个匹配图像
        dpi: 图像分辨率
    """
    print(f"Creating 2D match visualization...")
    
    # 读取查询图像
    query_img = read_image(query_image_path)
    
    # 获取name到id的映射
    name_to_id = {img.name: img_id for img_id, img in reconstruction.images.items()}
    
    # 统计每个数据库图像的inlier数量
    db_inlier_counts = {}
    
    for db_name in retrieved_images[:top_k * 2]:  # 多检查一些图像
        if db_name not in name_to_id:
            continue
        
        # 获取匹配
        matches, _ = get_matches(matches_path, query_name, db_name)
        if len(matches) == 0:
            continue
        
        db_id = name_to_id[db_name]
        db_image = reconstruction.images[db_id]
        
        # 获取3D点ID
        points3D_ids = np.array([
            p.point3D_id if p.has_point3D() else -1 
            for p in db_image.points2D
        ])
        
        # 计算inlier数量
        valid_matches = matches[points3D_ids[matches[:, 1]] != -1]
        if len(valid_matches) > 0:
            db_inlier_counts[db_name] = len(valid_matches)
    
    # 选择inlier最多的前k个
    top_db_names = sorted(db_inlier_counts.items(), key=lambda x: x[1], reverse=True)[:top_k]
    
    # 为每个top数据库图像创建可视化
    for idx, (db_name, count) in enumerate(top_db_names):
        db_image_path = db_image_dir / db_name
        if not db_image_path.exists():
            continue
        
        db_img = read_image(db_image_path)
        db_id = name_to_id[db_name]
        db_image = reconstruction.images[db_id]
        
        # 获取匹配
        matches, _ = get_matches(matches_path, query_name, db_name)
        
        # 获取关键点
        kp_q = query_keypoints[matches[:, 0]]
        kp_db = np.array([db_image.points2D[i].xy for i in matches[:, 1]])
        
        # 简单的inlier判断（这里简化处理）
        inliers_vis = np.ones(len(matches), dtype=bool)
        
        # 颜色映射
        color = cm_RdGn(inliers_vis).tolist()
        
        # 绘图
        plot_images([query_img, db_img], dpi=dpi)
        plot_matches(kp_q, kp_db, color, a=0.1)
        
        text = f"matches: {len(matches)}"
        add_text(0, text, fs=10)
        add_text(0, query_image_path.name, pos=(0.01, 0.01), fs=8, lcolor=None, va="bottom")
        add_text(1, db_name, pos=(0.01, 0.01), fs=8, lcolor=None, va="bottom")
        
        # 保存
        save_path = output_path.parent / f"{output_path.stem}_match_{idx}.png"
        plt.savefig(save_path, bbox_inches='tight', dpi=dpi)
        plt.close()
        print(f"  Saved match visualization to: {save_path}")


def visualize_3d_localization(
    reconstruction: pycolmap.Reconstruction,
    query_pose: pycolmap.Rigid3d,
    query_camera_dict: Dict,
    inlier_points3d: np.ndarray,
    output_path: Path,
    show_all_points: bool = True
):
    """
    可视化3D定位结果，包括重建点云、数据库相机和查询相机位姿
    
    Args:
        reconstruction: COLMAP重建对象
        query_pose: 查询图像的相机位姿
        query_camera_dict: 查询相机参数字典
        inlier_points3d: Inlier的3D点
        output_path: 输出HTML文件路径
        show_all_points: 是否显示所有重建点（False则只显示inlier点）
    """
    print(f"Creating 3D visualization...")
    
    # 初始化3D图形
    fig = init_figure(height=800)
    
    # 绘制重建（数据库图像和点云）
    if show_all_points:
        plot_reconstruction(
            fig, reconstruction,
            max_reproj_error=5.0,
            color="rgb(100, 100, 100)",
            name="Database",
            points=True,
            cameras=True,
            points_rgb=True,
            cs=0.5  # 相机大小
        )
    else:
        # 只绘制数据库相机
        from hloc.utils.viz_3d import plot_cameras
        plot_cameras(fig, reconstruction, color="rgb(100, 100, 100)", 
                    legendgroup="Database", size=0.5)
    
    # 绘制inlier 3D点（高亮显示）
    if len(inlier_points3d) > 0:
        plot_points(
            fig, inlier_points3d,
            color="rgb(0, 255, 0)",
            ps=4,
            name="Inliers"
        )
    
    # 创建查询相机对象
    query_camera = pycolmap.Camera(
        model=query_camera_dict["model"],
        width=query_camera_dict["width"],
        height=query_camera_dict["height"],
        params=query_camera_dict["params"]
    )
    
    # 绘制查询相机位姿
    plot_camera_colmap(
        fig, query_pose, query_camera,
        color="rgb(255, 0, 0)",
        name="Query Camera",
        fill=True,
        size=1.0,
        text="Query Image"
    )
    
    # 保存为HTML
    fig.write_html(str(output_path))
    print(f"  Saved 3D visualization to: {output_path}")
    
    return fig


def localize_single_image(
    query_image_path: Path,
    sfm_model_path: Path,
    db_global_features: Path,
    db_local_features: Path,
    db_matches: Path,
    output_dir: Path,
    num_retrieved: int = 20,
    cleanup_temp: bool = True,
    visualize: bool = True,
    db_image_dir: Optional[Path] = None
) -> Optional[Dict]:
    """
    单张图像定位主函数
    
    Args:
        query_image_path: 查询图像路径
        sfm_model_path: SfM模型路径
        db_global_features: 数据库全局特征
        db_local_features: 数据库局部特征
        db_matches: 数据库匹配结果
        output_dir: 输出目录
        num_retrieved: 检索图像数量
        cleanup_temp: 是否清理临时文件
        visualize: 是否生成可视化结果
        db_image_dir: 数据库图像目录（用于可视化）
        
    Returns:
        定位结果字典
    """
    
    print(f"Starting localization for: {query_image_path}")
    print("=" * 80)
    
    # 开始计时
    time_start_total = time.time()
    timing_stats = {}
    
    # 创建输出目录和中间文件目录
    output_dir.mkdir(parents=True, exist_ok=True)
    
    # 为每次定位创建唯一的中间文件目录
    import uuid
    timestamp = int(time.time())
    unique_id = str(uuid.uuid4())[:8]
    temp_dir = output_dir / f"temp_{timestamp}_{unique_id}"
    temp_dir.mkdir(parents=True, exist_ok=True)
    
    print(f"Using temporary directory: {temp_dir}")
    
    # 1. 加载SfM重建模型
    print("\n[1/5] Loading SfM reconstruction...")
    time_start = time.time()
    reconstruction = pycolmap.Reconstruction(sfm_model_path)
    name_to_id = {img.name: img_id for img_id, img in reconstruction.images.items()}
    timing_stats['load_reconstruction'] = time.time() - time_start
    print(f"Loaded {len(reconstruction.images)} images and {len(reconstruction.points3D)} 3D points")
    print(f"⏱️  Time: {timing_stats['load_reconstruction']:.3f}s")
    
    # 2. 提取查询图像特征
    print("\n[2/5] Extracting query image features...")
    time_start = time.time()
    
    # ========================================================================
    # 【特征提取】从query image中提取两种类型的特征
    # ========================================================================
    # 
    # 1. 全局特征（NetVLAD）- 用于图像检索
    #    - 将整张图像编码为一个固定维度的向量
    #    - 不包含空间位置信息
    #    - 用于快速找到相似的database images
    # 
    # 2. 局部特征（SuperPoint）- 用于特征匹配
    #    - 检测图像中的关键点（keypoints）
    #    - 为每个关键点生成描述子（descriptors，256维）
    #    - 包含空间位置信息（x, y坐标）
    #    - 用于后续的精确特征匹配
    # 
    # 实现细节（在 hloc/extract_features.py 中）：
    #   - 加载预训练的NetVLAD/SuperPoint模型
    #   - 图像预处理（resize、归一化等）
    #   - 模型推理（GPU加速）
    #   - 关键点坐标缩放回原始图像尺寸
    #   - 保存到H5文件：keypoints, descriptors, scores, image_size
    # ========================================================================
    
    # 提取全局特征（NetVLAD）
    retrieval_conf = extract_features.confs["netvlad"]
    query_global_features = extract_features.main(
        retrieval_conf, query_image_path.parent, temp_dir
    )
    
    # 提取局部特征（SuperPoint）
    feature_conf = extract_features.confs["superpoint_aachen"]
    query_local_features = extract_features.main(
        feature_conf, query_image_path.parent, temp_dir
    )
    
    timing_stats['feature_extraction'] = time.time() - time_start
    print(f"⏱️  Time: {timing_stats['feature_extraction']:.3f}s")
    
    # 3. 图像检索
    print("\n[3/5] Performing image retrieval...")
    time_start = time.time()
    pairs_path = temp_dir / "query_pairs.txt"
    pairs_from_retrieval.main(
        query_global_features, pairs_path, 
        num_matched=num_retrieved,
        db_descriptors=db_global_features
    )
    
    # 读取检索结果
    retrieval_dict = parse_retrieval(pairs_path)
    query_name = list(retrieval_dict.keys())[0]
    retrieved_images = retrieval_dict[query_name]
    timing_stats['image_retrieval'] = time.time() - time_start
    print(f"Retrieved {len(retrieved_images)} similar images")
    print(f"⏱️  Time: {timing_stats['image_retrieval']:.3f}s")
    
    # 4. 特征匹配
    print("\n[4/5] Matching features...")
    time_start = time.time()
    matcher_conf = match_features.confs["superglue"]
    
    # ========================================================================
    # 【特征匹配】使用 SuperGlue 匹配 query 和 database images 的局部特征
    # ========================================================================
    # 
    # 输入：
    #   - pairs_path: 图像对列表（从图像检索阶段生成）
    #   - query_local_features: query image 的局部特征（SuperPoint）
    #   - db_local_features: database images 的局部特征
    # 
    # SuperGlue 匹配算法流程（在 hloc/match_features.py 中实现）：
    #   1. 读取特征对：从H5文件加载 query 和 db 的 keypoints + descriptors
    #   2. 关键点编码：融合位置信息和视觉特征
    #   3. 图神经网络（GNN）：
    #      - 自注意力：每个图像内部的特征点交互
    #      - 交叉注意力：两个图像之间的特征点交互
    #   4. 最优传输（Sinkhorn算法）：
    #      - 确保一对一匹配
    #      - 确保双向一致性（互斥性）
    #   5. 阈值过滤：只保留分数高于阈值的匹配
    # 
    # 输出：
    #   - matches_path: H5 文件，包含匹配结果
    #     * 格式：h5file[pair]["matches0"] - 一维数组，值表示 db_idx 或 -1（未匹配）
    #     * 数组长度 = query image 的特征点数量
    #     * matches0[i] = j 表示 query 的第 i 个特征点匹配到 db 的第 j 个特征点
    #     * matches0[i] = -1 表示 query 的第 i 个特征点未匹配
    # 
    # 优势：
    #   - 比简单的最近邻匹配更鲁棒
    #   - 能够处理遮挡、重复纹理等困难场景
    #   - 考虑上下文信息，提高匹配质量
    # ========================================================================
    
    # 生成匹配结果文件路径
    matches_output = temp_dir / f'{query_local_features.stem}_{matcher_conf["output"]}_{pairs_path.stem}.h5'
    
    matches_path = match_features.main(
        matcher_conf, pairs_path, query_local_features,
        matches=matches_output,
        features_ref=db_local_features
    )
    
    timing_stats['feature_matching'] = time.time() - time_start
    print(f"⏱️  Time: {timing_stats['feature_matching']:.3f}s")
    
    # 5. 位姿估计
    print("\n[5/5] Estimating camera pose...")
    time_start = time.time()
    
    # ========================================================================
    # 【核心流程】特征匹配到3D点的对应关系建立
    # ========================================================================
    # 目标：通过特征匹配，建立 query image 的 2D 特征点与 3D 点的对应关系
    # 原理：利用 SfM 重建中已有的 database image 2D-3D 关联，通过特征匹配桥接到 query image
    # ========================================================================
    
    # 读取查询图像特征（关键点坐标）
    with h5py.File(query_local_features, "r") as f:
        kpq = f[query_name]["keypoints"].__array__()  # shape: (N_query, 2)
    
    # 收集2D-3D对应点
    points2D_all = []      # query image 的 2D 点坐标列表
    points3D_ids = []      # 对应的 3D 点 ID 列表
    num_matches = 0
    
    # 遍历所有检索到的 database images
    for db_name in retrieved_images:
        if db_name not in name_to_id:
            continue
            
        db_id = name_to_id[db_name]
        db_image = reconstruction.images[db_id]
        
        # 跳过没有3D点的database image
        if db_image.num_points3D == 0:
            continue
        
        # ====================================================================
        # 步骤1：获取 query image 和 database image 之间的特征匹配
        # ====================================================================
        # get_matches() 返回格式：
        #   matches: shape (N_matches, 2), 每行为 [query_idx, db_idx]
        #   - query_idx: query image 的特征点索引
        #   - db_idx: database image 的特征点索引
        # ====================================================================
        matches, _ = get_matches(matches_path, query_name, db_name)
        if len(matches) == 0:
            continue
        
        # ====================================================================
        # 步骤2：获取 database image 的 2D-3D 关联关系
        # ====================================================================
        # db_image.points2D 是 COLMAP 重建中该 database image 的所有 2D 特征点
        # 每个 Point2D 对象可能关联一个 3D 点（point3D_id），也可能没有（-1）
        # 
        # 重要假设：H5 文件中的特征点索引与 COLMAP 重建中的 points2D 索引一一对应
        # 即：db_image.points2D[j] 对应 H5 文件中 db_idx = j 的特征点
        # 
        # points3D_ids_db[i] 表示 database image 第 i 个特征点对应的 3D 点 ID
        # ====================================================================
        points3D_ids_db = np.array([
            p.point3D_id if p.has_point3D() else -1 
            for p in db_image.points2D
        ])
        
        # ====================================================================
        # 步骤3：过滤有效匹配（只保留有对应 3D 点的匹配）
        # ====================================================================
        # matches[:, 1] 是 database image 的特征点索引（db_idx）
        # points3D_ids_db[matches[:, 1]] 获取这些特征点对应的 3D 点 ID
        # != -1 过滤出有 3D 点的匹配
        # ====================================================================
        valid_matches = matches[points3D_ids_db[matches[:, 1]] != -1]
        num_matches += len(valid_matches)
        
        # ====================================================================
        # 步骤4：建立 query image 2D 点 ↔ 3D 点的对应关系
        # ====================================================================
        # 对于每个有效匹配 (idx_q, idx_r)：
        #   - idx_q: query image 的特征点索引
        #   - idx_r: database image 的特征点索引
        #   - points3D_ids_db[idx_r]: 对应的 3D 点 ID
        # 
        # 最终结果：
        #   - points2D_all: query image 的 2D 点坐标
        #   - points3D_ids: 对应的 3D 点 ID（后续可通过 reconstruction.points3D[pid].xyz 获取坐标）
        # ====================================================================
        for idx_q, idx_r in valid_matches:
            points2D_all.append(kpq[idx_q])                    # query 的 2D 点坐标
            points3D_ids.append(points3D_ids_db[idx_r])        # 对应的 3D 点 ID
    
    if len(points2D_all) < 10:
        print(f"Not enough matches: {len(points2D_all)}")
        return None
    
    # ========================================================================
    # 步骤5：转换为numpy数组，准备用于位姿估计
    # ========================================================================
    # points2D: query image 的 2D 点坐标，shape (N, 2)
    # points3D: 对应的 3D 点坐标，shape (N, 3)
    # ========================================================================
    points2D = np.array(points2D_all)
    points3D = np.array([reconstruction.points3D[pid].xyz for pid in points3D_ids])
    
    # ========================================================================
    # 步骤6：估计相机内参（简化处理）
    # ========================================================================
    # 实际应用中应该使用相机标定结果，这里使用经验值估计
    # ========================================================================
    import cv2
    img = cv2.imread(str(query_image_path))
    height, width = img.shape[:2]
    focal_length = max(width, height) * 0.8
    cx, cy = width / 2, height / 2
    
    camera_config = {
        "model": "SIMPLE_PINHOLE",
        "width": width,
        "height": height,
        "params": [focal_length, cx, cy],
    }
    
    # ========================================================================
    # 步骤7：使用 PnP 算法估计相机位姿
    # ========================================================================
    # 输入：2D-3D 对应点 (points2D, points3D) 和相机内参
    # 方法：使用 RANSAC + PnP 算法估计绝对位姿
    # 输出：相机位姿 (旋转矩阵 R 和平移向量 t)，表示从世界坐标系到相机坐标系的变换
    # ========================================================================
    estimation_options = pycolmap.AbsolutePoseEstimationOptions()
    estimation_options.ransac.max_error = 12  # RANSAC 重投影误差阈值（像素）
    
    result = pycolmap.estimate_and_refine_absolute_pose(
        points2D, points3D, camera_config, estimation_options
    )
    
    if result is None:
        print("Pose estimation failed")
        return None
    
    timing_stats['pose_estimation'] = time.time() - time_start
    print(f"Pose estimation successful with {result['num_inliers']} inliers")
    print(f"⏱️  Time: {timing_stats['pose_estimation']:.3f}s")
    
    # 计算总时间
    timing_stats['total'] = time.time() - time_start_total
    
    # 整理结果（使用原始数据，稍后统一转换）
    localization_result = {
        "query_image": str(query_image_path),
        "retrieved_images": retrieved_images,
        "pose": result["cam_from_world"].todict(),
        "camera": camera_config,
        "num_inliers": result["num_inliers"],
        "num_matches": num_matches,
        "success": True,
        "temp_dir": str(temp_dir),
        "timing": timing_stats,
    }
    
    # 将所有 NumPy 类型转换为 JSON 可序列化的格式
    localization_result = convert_to_serializable(localization_result)
    
    # 打印时间统计摘要
    print("\n" + "=" * 80)
    print("⏱️  TIMING SUMMARY")
    print("=" * 80)
    print(f"  Load Reconstruction:    {timing_stats['load_reconstruction']:8.3f}s  ({timing_stats['load_reconstruction']/timing_stats['total']*100:5.1f}%)")
    print(f"  Feature Extraction:     {timing_stats['feature_extraction']:8.3f}s  ({timing_stats['feature_extraction']/timing_stats['total']*100:5.1f}%)")
    print(f"  Image Retrieval:        {timing_stats['image_retrieval']:8.3f}s  ({timing_stats['image_retrieval']/timing_stats['total']*100:5.1f}%)")
    print(f"  Feature Matching:       {timing_stats['feature_matching']:8.3f}s  ({timing_stats['feature_matching']/timing_stats['total']*100:5.1f}%)")
    print(f"  Pose Estimation:        {timing_stats['pose_estimation']:8.3f}s  ({timing_stats['pose_estimation']/timing_stats['total']*100:5.1f}%)")
    print("-" * 80)
    print(f"  TOTAL TIME:             {timing_stats['total']:8.3f}s")
    print("=" * 80)
    
    # 生成可视化
    if visualize:
        try:
            print("\n[Visualization] Generating visualizations...")
            time_start_vis = time.time()
            vis_dir = output_dir / "visualizations"
            vis_dir.mkdir(exist_ok=True)
            
            # 如果没有指定数据库图像目录，尝试从SfM模型路径推断
            if db_image_dir is None:
                # 假设图像在 sfm_model_path/../images 或 sfm_model_path/../../images
                db_image_dir = sfm_model_path.parent / "images"
                if not db_image_dir.exists():
                    db_image_dir = sfm_model_path.parent.parent / "images"
            
            # 2D匹配可视化
            if db_image_dir and db_image_dir.exists():
                vis_2d_path = vis_dir / f"{query_image_path.stem}_2d_matches"
                visualize_2d_matches(
                    query_image_path=query_image_path,
                    retrieved_images=retrieved_images,
                    reconstruction=reconstruction,
                    matches_path=matches_path,
                    query_keypoints=kpq,
                    inlier_mask=None,  # 简化处理
                    query_name=query_name,
                    db_image_dir=db_image_dir,
                    output_path=vis_2d_path,
                    top_k=3
                )
            else:
                print(f"  Skipping 2D visualization: database image directory not found")
            
            # 3D定位可视化
            vis_3d_path = vis_dir / f"{query_image_path.stem}_3d_localization.html"
            visualize_3d_localization(
                reconstruction=reconstruction,
                query_pose=result["cam_from_world"],
                query_camera_dict=camera_config,
                inlier_points3d=points3D,  # 使用所有用于定位的3D点
                output_path=vis_3d_path,
                show_all_points=True
            )
            
            timing_stats['visualization'] = time.time() - time_start_vis
            print(f"Visualizations saved to: {vis_dir}")
            print(f"⏱️  Visualization time: {timing_stats['visualization']:.3f}s")
        except Exception as e:
            print(f"Warning: Visualization failed: {e}")
            import traceback
            traceback.print_exc()
    
    # 清理临时文件
    if cleanup_temp:
        print(f"Cleaning up temporary files in: {temp_dir}")
        try:
            shutil.rmtree(temp_dir)
            print("Temporary files cleaned up successfully")
        except Exception as e:
            print(f"Warning: Failed to clean up temporary files: {e}")
    else:
        print(f"Temporary files preserved in: {temp_dir}")
    
    return localization_result


def main():
    """主函数"""
    if len(sys.argv) < 7:
        print("Usage: python3 localize_single.py <query_image> <sfm_model> <global_features> <local_features> <matches> <output_dir> [num_retrieved] [keep_temp] [visualize] [db_image_dir]")
        print("Example: python3 localize_single.py query.jpg sfm_model/ global-feats.h5 local-feats.h5 matches.h5 output/ 20 false true images/")
        print("")
        print("Parameters:")
        print("  query_image     - Path to the query image")
        print("  sfm_model       - Path to the SfM reconstruction model")
        print("  global_features - Path to the global features H5 file")
        print("  local_features  - Path to the local features H5 file")
        print("  matches         - Path to the matches H5 file")
        print("  output_dir      - Output directory")
        print("  num_retrieved   - Number of retrieved images (optional, default: 20)")
        print("  keep_temp       - Keep temporary files (optional, default: false)")
        print("  visualize       - Generate visualizations (optional, default: true)")
        print("  db_image_dir    - Database image directory for visualization (optional, auto-detect if not specified)")
        sys.exit(1)
    
    query_image_path = Path(sys.argv[1])
    sfm_model_path = Path(sys.argv[2])
    db_global_features = Path(sys.argv[3])
    db_local_features = Path(sys.argv[4])
    db_matches = Path(sys.argv[5])
    output_dir = Path(sys.argv[6])
    num_retrieved = int(sys.argv[7]) if len(sys.argv) > 7 else 20
    keep_temp = sys.argv[8].lower() == 'true' if len(sys.argv) > 8 else False
    visualize = sys.argv[9].lower() != 'false' if len(sys.argv) > 9 else True
    db_image_dir = Path(sys.argv[10]) if len(sys.argv) > 10 else None
    
    # 验证输入
    for path in [query_image_path, sfm_model_path, db_global_features, db_local_features, db_matches]:
        if not path.exists():
            print(f"Error: {path} not found")
            sys.exit(1)
    
    # 执行定位
    result = localize_single_image(
        query_image_path, sfm_model_path, db_global_features,
        db_local_features, db_matches, output_dir, num_retrieved, 
        not keep_temp, visualize, db_image_dir
    )
    
    if result is None:
        print("Localization failed!")
        sys.exit(1)
    
    # 保存结果
    result_path = output_dir / "localization_result.json"
    with open(result_path, "w") as f:
        json.dump(result, f, indent=2)
    
    print("\n" + "=" * 80)
    print("✅ LOCALIZATION COMPLETED!")
    print("=" * 80)
    print(f"📄 Result saved to: {result_path}")
    print(f"📊 Inliers: {result['num_inliers']}/{result['num_matches']} ({result['num_inliers']/result['num_matches']*100:.1f}%)")
    print(f"⏱️  Total time: {result['timing']['total']:.3f}s")
    print("=" * 80)


if __name__ == "__main__":
    main()
