import argparse
import sqlite3
from collections import defaultdict
from pathlib import Path
import pymap3d as pm

import numpy as np
from tqdm import tqdm
import xml.etree.ElementTree as ET
from scipy.spatial.transform import Rotation

from . import logger
from .utils.read_write_model import (
    CAMERA_MODEL_NAMES,
    Camera,
    Image,
    Point3D,
    write_model,
)

from pyproj import Transformer

def recover_database_images_and_ids(database_path):
    images = {}
    cameras = {}
    db = sqlite3.connect(str(database_path))
    ret = db.execute("SELECT name, image_id, camera_id FROM images;")
    for name, image_id, camera_id in ret:
        images[name] = image_id
        cameras[name] = camera_id
    db.close()
    logger.info(f"Found {len(images)} images and {len(cameras)} cameras in database.")
    return images, cameras


def quaternion_to_rotation_matrix(qvec):
    qvec = qvec / np.linalg.norm(qvec)
    w, x, y, z = qvec
    R = np.array(
        [
            [1 - 2 * y * y - 2 * z * z, 2 * x * y - 2 * z * w, 2 * x * z + 2 * y * w],
            [2 * x * y + 2 * z * w, 1 - 2 * x * x - 2 * z * z, 2 * y * z - 2 * x * w],
            [2 * x * z - 2 * y * w, 2 * y * z + 2 * x * w, 1 - 2 * x * x - 2 * y * y],
        ]
    )
    return R


def camera_center_to_translation(c, qvec):
    R = quaternion_to_rotation_matrix(qvec)
    return (-1) * np.matmul(R, c)


def read_mvs_model(mvs_path):
    tree = ET.parse(mvs_path)
    
    # 2. 获取 XML 树的根元素
    root = tree.getroot()
    lat_origin = 23.3813564722  # Latitude of the origin
    lon_origin = 111.6316708889 # Longitude of the origin
    alt_origin = 0.0             # Altitude of the origin
    
    x_offset = -2159227.0
    y_offset = 5445019.5
    z_offset = 2515560.0
    
    transformer = Transformer.from_crs("EPSG:4326", "EPSG:4978", always_xy=True)
    
    for SRS in root.findall('.//SRS'):
        photo_id = SRS.find('Id').text
        definition_element_text = SRS.find('Definition').text
        coords = definition_element_text.split(':')[1].split(',')
        lat_origin = float(coords[0])
        lon_origin = float(coords[1])
        
    
    camera_id = 0
    camera_model = CAMERA_MODEL_NAMES["OPENCV"]
    width = int(root.find('.//ImageDimensions/Width').text)
    height = int(root.find('.//ImageDimensions/Height').text)

    params = [1500.0, 1500.0, 960.0, 540.0, 0., 0., 0., 0.]
    params[0] = float(root.find('.//FocalLengthPixels').text)
    params[1] = float(root.find('.//FocalLengthPixels').text)
    params[2] = float(root.find('.//PrincipalPoint/x').text)
    params[3] = float(root.find('.//PrincipalPoint/y').text)
    params[4] = float(root.find('.//Distortion/K1').text)
    params[5] = float(root.find('.//Distortion/K2').text)
    params[6] = float(root.find('.//Distortion/P1').text)
    params[7] = float(root.find('.//Distortion/P2').text)
    # params[8] = float(root.find('.//Distortion/K3').text)
    assert len(params) == camera_model.num_params
    cameras = {}
    # 创建相机对象
    camera = Camera(
        id = camera_id,
        model = "OPENCV",
        width = width,
        height = height,
        params = params
    )
    cameras[camera_id] = camera
    images = {}
    points3D = {}
    max_x = -1000000.0
    max_y = -1000000.0
    min_x = 1000000.0
    min_y = 1000000.0
    src = np.array([]).reshape(0, 3)
    tgt = np.array([]).reshape(0, 3)
    for photo in root.findall('.//Photo'):
        # 提取中心坐标
        center = photo.find('Pose/Center')
        x = float(center.find('x').text)
        y = float(center.find('y').text)
        z = float(center.find('z').text)
        
        col = np.array([x, y, z])
        src = np.insert(src, 0, col, axis=0)
            
        lat_wgs84, lon_wgs84, alt_wgs84 = pm.enu2geodetic(
            x, y, z, 
            lat_origin, lon_origin, alt_origin, 
            deg=True # Set deg=True if your input origin lat/lon are in degrees
        )
        x, y, z = transformer.transform(lon_wgs84, lat_wgs84, z)
        x = x - x_offset
        y = y - y_offset
        z = z - z_offset
        col = np.array([x, y, z])
        tgt = np.insert(tgt, 0, col, axis=0)
 
    src_centroid = np.mean(src, axis=0)
    tgt_centroid = np.mean(tgt, axis=0)
    
    # 去中心化
    src_centered = src - src_centroid
    tgt_centered = tgt - tgt_centroid
    
    # 计算旋转矩阵
    H = src_centered.T @ tgt_centered
    U, S, Vt = np.linalg.svd(H)
    R = Vt.T @ U.T
    
    # 确保右手坐标系
    if np.linalg.det(R) < 0:
        Vt[-1, :] *= -1
        R = Vt.T @ U.T    
    R = np.linalg.inv(R)
        
    for photo in root.findall('.//Photo'):
        image_id = int(photo.find('Id').text)
        image_path = photo.find('ImagePath').text
        idx = image_path.rfind("\\") + 1
        image_path = image_path[idx:]
        # 提取中心坐标
        center = photo.find('Pose/Center')
        x = float(center.find('x').text)
        y = float(center.find('y').text)
        z = float(center.find('z').text)
        rotation = photo.find('Pose/Rotation')
        rot_mat = np.array([float(rotation.find('M_00').text), float(rotation.find('M_01').text), float(rotation.find('M_02').text),
            float(rotation.find('M_10').text), float(rotation.find('M_11').text), float(rotation.find('M_12').text),
            float(rotation.find('M_20').text), float(rotation.find('M_21').text), float(rotation.find('M_22').text)])
        
        lat_wgs84, lon_wgs84, alt_wgs84 = pm.enu2geodetic(
            x, y, z, 
            lat_origin, lon_origin, alt_origin, 
            deg=True # Set deg=True if your input origin lat/lon are in degrees
        )
        
        x, y, z = transformer.transform(lon_wgs84, lat_wgs84, z)
        x = x - x_offset
        y = y - y_offset
        z = z - z_offset
        
        rot_mat = rot_mat.reshape(3, 3)
        rot_mat = rot_mat @ R
        trans = -rot_mat @ np.array([x, y, z])
        quat = Rotation.from_matrix(rot_mat).as_quat()
        quat = quat[[3, 0, 1, 2]]
        xys = np.zeros((0, 2), float)
        point3D_ids = np.full(0, -1, int)
        image = Image(
            id = image_id,
            name = image_path,
            camera_id = camera_id,
            qvec = quat,
            tvec = trans,
            xys = xys,
            point3D_ids = point3D_ids,
        )
        images[image_id] = image
        
        
   

    return cameras, images, points3D


def main(nvm, output):
    assert nvm.exists(), nvm

    #image_ids, camera_ids = recover_database_images_and_ids(database)

    logger.info("Reading the NVM model...")
    model = read_mvs_model(
        nvm
    )

    logger.info("Writing the COLMAP model...")
    output.mkdir(exist_ok=True, parents=True)
    write_model(*model, path=str(output), ext=".bin")
    logger.info("Done.")


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("--nvm", required=True, type=Path)
    parser.add_argument("--intrinsics", required=True, type=Path)
    parser.add_argument("--database", required=True, type=Path)
    parser.add_argument("--output", required=True, type=Path)
    parser.add_argument("--skip_points", action="store_true")
    args = parser.parse_args()
    main(**args.__dict__)
