#!/usr/bin/env python3
"""
Pose point cloud generation utilities for the data processing pipeline.

This module provides a function to generate foundation poses from camera data.
"""
import numpy as np
from pathlib import Path
import trimesh
import subprocess
import sys
from typing import Dict, Any
from convert_glb_to_obj import convert_glb_to_obj
from copy import deepcopy
import math
MIMIC=False
if not MIMIC:
    from poseTrackPcd import poseTrack
from scipy.spatial.transform import Rotation
from rgbPcd import RGBPCD
from semPcd import parts2colors
from envs.utils.pcd_trans import xyzrpy_to_transformation_matrix, transformation_matrix_to_xyzrpy
from envs.utils.save_file import save_depth
from pathlib import Path
sys.path.append(str(Path(__file__).parent.parent / "FoundationPose-main"))
import run_demo


def pose_matrix_to_xyz_quaternion(pose_matrix):
    """
    将4×4位姿矩阵转换为xyzqxqyqzqw格式

    参数:
        pose_matrix: 4×4齐次变换矩阵

    返回:
        [x, y, z, qx, qy, qz, qw]
    """
    # 提取平移向量
    x = pose_matrix[0, 3]
    y = pose_matrix[1, 3]
    z = pose_matrix[2, 3]

    # 提取旋转矩阵
    rotation_matrix = pose_matrix[:3, :3]

    # 将旋转矩阵转换为四元数
    qw = math.sqrt(max(0, 1 + rotation_matrix[0, 0] + rotation_matrix[1, 1] + rotation_matrix[2, 2])) / 2
    qx = math.sqrt(max(0, 1 + rotation_matrix[0, 0] - rotation_matrix[1, 1] - rotation_matrix[2, 2])) / 2
    qy = math.sqrt(max(0, 1 - rotation_matrix[0, 0] + rotation_matrix[1, 1] - rotation_matrix[2, 2])) / 2
    qz = math.sqrt(max(0, 1 - rotation_matrix[0, 0] - rotation_matrix[1, 1] + rotation_matrix[2, 2])) / 2

    # 确定四元数分量的符号
    qx = math.copysign(qx, rotation_matrix[2, 1] - rotation_matrix[1, 2])
    qy = math.copysign(qy, rotation_matrix[0, 2] - rotation_matrix[2, 0])
    qz = math.copysign(qz, rotation_matrix[1, 0] - rotation_matrix[0, 1])

    # 归一化四元数
    norm = math.sqrt(qx * qx + qy * qy + qz * qz + qw * qw)
    if norm > 0:
        qx /= norm
        qy /= norm
        qz /= norm
        qw /= norm

    return [x, y, z, qx, qy, qz, qw]


def glb_to_obj(unifiedData: Dict[str, Any]) -> Dict[str, Any]:
    assets = unifiedData['assets']
    for asset in assets:
        asset_dir = Path(__file__).parent.parent / "RoboTwin" / "assets" / "objects" / asset.split("/")[0] / "visual"
        glb_path = asset_dir / (asset.split("/")[1] + ".glb")
        obj_path = asset_dir / (asset.split("/")[1] + ".obj")
        mtl_path = asset_dir / (asset.split("/")[1] + ".mtl") 
        assert glb_path.exists(), f"GLB file not found: {glb_path}"
        if (not obj_path.exists()) or (not mtl_path.exists()):
            print(f"Converting {glb_path} to {obj_path}")
            convert_glb_to_obj(str(glb_path), str(obj_path), suppress_print=True)
        mesh = trimesh.load(str(obj_path))
        bbox = mesh.bounds
        print(f"Asset: {asset} Min XYZ: {bbox[0]} Max XYZ: {bbox[1]}")
    return unifiedData
def construct_obj_imgs(unifiedData: Dict[str, Any],embodiment_char: str,seg_source: str) -> Dict[str, Any]:
    cam_info = unifiedData['cameras']['midBack_camera']
    unifiedData['obj_rgb'] = cam_info['rgb']
    unifiedData['obj_depth'] = cam_info['depth']
    unifiedData['obj_intrinsics'] = cam_info['intrinsic_cv']
    unifiedData['obj_cam2world_gl'] = cam_info['cam2world_gl']
    assets_colors_dict,_ = parts2colors(unifiedData,embodiment_char,seg_source,'indices2assets')
    print("assets_colors_dict",assets_colors_dict)
    unifiedData['obj_masks'] = {}
    for asset, colors in assets_colors_dict.items():
        masks = RGBPCD.seg_to_mask_static(unifiedData,colors,single_cam_field='midBack_camera')
        unifiedData['obj_masks'][asset] = masks['midBack_camera']
    short_assets_name = [asset_full_to_txtname(asset) for asset in unifiedData['assets']]
    assert set(unifiedData['obj_masks'].keys()) == set(short_assets_name), f"obj_masks keys {unifiedData['obj_masks'].keys()} do not match unifiedData['assets'] keys {short_assets_name} from {unifiedData['assets']}"
    return unifiedData
def check_wanted_fields(unifiedData: Dict[str, Any]) -> bool:
    no_missing=True
    for wanted_fields in ['obj_rgb', 'obj_depth', 'obj_intrinsics', 'obj_masks', 'assets']:
        if wanted_fields not in unifiedData or unifiedData[wanted_fields] is None:
            print(f"{wanted_fields} missing, will early return")
            no_missing=False
    return no_missing
def asset_full_to_txtname(asset_fullname: str) -> str:
    return asset_fullname.split("_")[1].split("/")[0]
def posePcd(unifiedData: Dict[str, Any],embodiment_char: str,seg_source: str) -> Dict[str, Any]:
    glb_to_obj(unifiedData)
    construct_obj_imgs(unifiedData,embodiment_char,seg_source)
    if not check_wanted_fields(unifiedData):
        return unifiedData
    assets = unifiedData['assets']
    rgb = unifiedData['obj_rgb']
    depth = unifiedData['obj_depth']
    intrin_mat = unifiedData['obj_intrinsics']
    extrin_mat = unifiedData['obj_cam2world_gl']
    # TODO whether need to change extrin_mat
    # extrin_mat[:,1:3] = -extrin_mat[:,1:3]
    convert_mat = np.array([
        [1,  0,  0, 0],
        [0, -1,  0, 0],
        [0,  0, -1, 0],
        [0,  0,  0, 1]
    ])
    extrin_mat =  extrin_mat @ convert_mat

    obj_masks = unifiedData['obj_masks']
    depth_ori = deepcopy(depth)
    depth = depth / 1000.0
    # save_path = "./"
    # save_depth(save_path = save_path, depth_array = depth)
    # save_img(save_path = save_path, img_file = obj_masks)
    
    foundation_poses = []
    for i, asset_fullname in enumerate(assets):
        # asset_fullname = assets[0]
        # assert "_"
        asset_txtname = asset_full_to_txtname(asset_fullname)
        asset_dir = Path(__file__).parent.parent / "RoboTwin" / "assets" / "objects" / asset_fullname.split("/")[0] / "visual"
        obj_path = asset_dir / (asset_fullname.split("/")[1] + ".obj")
        print("obj_path", obj_path, asset_txtname,asset_fullname)
        assert obj_path.exists(), f"OBJ file not found: {obj_path}"
        pose_6d=None
        pose_7d = None
        if MIMIC:
            pose_6d = deepcopy(unifiedData['key_poses'][0:6]+[i*0.1,0,0,0,0,0])
        else:
            
            mesh = trimesh.load(str(obj_path))
            # to_origin, extents = trimesh.bounds.oriented_bounds(mesh)
            pose = poseTrack(rgb, depth, intrin_mat, extrin_mat, obj_masks[asset_txtname], str(obj_path), frame_id=i, debug=3)
            # pose = pose @ np.linalg.inv(to_origin)
            # # TODO check extrin_mat
            # pose_local =  extrin_mat @ pose
            pose_local = pose
            pose_6d = transformation_matrix_to_xyzrpy(pose_local)
            pose_7d = pose_matrix_to_xyz_quaternion(pose_local)
            # print("xyzrpy", pose_6d)
            print("intrinsic_mat", intrin_mat)
            # pose_6d = run_demo.run_demo(mesh_file = str(obj_path), depth = depth, rgb= rgb, mask = obj_masks[asset_txtname], K = intrin_mat)
        foundation_poses.append(pose_6d)
        # foundation_poses.append(pose_7d)
    foundation_poses = np.array(foundation_poses)
    unifiedData['foundationPoses'] = foundation_poses
    return unifiedData
