#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   evaluate_hoi_pose_traj.py
@Time    :   2025/03/18 14:37:38
@Author  :   Sanqi Lu
@Contact :   sanqilu376@163.com
@Desc    :   当前文件作用
'''
import sys
sys.path.append(".")
import os

import numpy as np
import shutil
import imageio
import natsort
import json


def batch_cdist(x1, x2, p=2):
    # (B, N, 3)
    """支持批量计算的版本"""
    assert x1.shape[0] == x2.shape[0], "批量大小不一致"
    batch_size = x1.shape[0]
    if p == 2:
        x1_sq = np.sum(x1**2, axis=-1, keepdims=True)  # (b, m, 1)
        x2_sq = np.sum(x2**2, axis=-1)[:, np.newaxis] # (b, 1, n)
        cross = np.matmul(x1, np.swapaxes(x2, 1, 2))   # (b, m, n)
        return np.sqrt(x1_sq + x2_sq - 2 * cross + 1e-8)
    

def Chamfer_Distance(pred, gt):
    """
    Computes the Chamfer Distance between two point clouds.
    
    Args:
        pred: torch.Tensor of shape (B, N, 3), predicted point cloud.
        gt: torch.Tensor of shape (B, M, 3), ground truth point cloud.
    
    Returns:
        Mean Chamfer Distance over the batch.
    """
    assert pred.shape[0]== gt.shape[0], "Batch size mismatch."
    
    # Compute pairwise squared distances
    dist_sq = batch_cdist(pred, gt, p=2.0) ** 2  # (B, N, M)
    # print(f"dist_sq: {dist_sq.shape}")
    # Minimum distance for each predicted point to gt
    min_dist_p_to_g = np.min(dist_sq, axis=2) # [0]  # (B, N)
    # print(f"min_dist_p_to_g: {min_dist_p_to_g.shape}")
    # Minimum distance for each gt point to predicted
    min_dist_g_to_p = np.min(dist_sq, axis=1) # [0]  # (B, M)    
    # Average over points and sum both directions
    chamfer_loss = (np.mean(min_dist_p_to_g, axis=1) + np.mean(min_dist_g_to_p, axis=1))
    # print(f"chamfer_loss: {chamfer_loss.shape}")
    return chamfer_loss.mean()
def eval_hoi_pose_traj_cogvideo(calculate_total_gen_sample=None):
    cogvideox_size_hw = [240, 368]

    sampled_results_dir = "/WORK/PUBLIC/liuyebin_work/lingweidang/outputs/for_paper/ablation/auxiliary_motion_diffusion/based_on_ours_53200/ours_w_vid_lora_based_on_ours_53200_240x368/sample_ours_w_mgan_lora_based_on_ours_53200_ckpt600_240x368"

    # data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    data_root = "/WORK/PUBLIC/liuyebin_work/lingweidang/datas/TACO_Data_20250314/full_20k_plus"
    # prompts_column_path = "test_small_151_VLMEnhanced_prompts.txt"
    # img_column_path = "test_small_151_images_416x624.txt"
    # color_video_column_path = "test_small_151_color_videos_416x624.txt"
    # pose_video_column_path = "test_small_151_black_pose_videos_416x624.txt"
    # hoi_traj_path = "test_small_151_normalized_hoi_pose_trajs.txt"

    prompts_column_path = "ablation_train10_test10_prompts.txt" # test_small_151_VLMEnhanced_prompts
    img_column_path = "ablation_train10_test10_images.txt" # test_small_151_images_416x624
    color_video_column_path = "ablation_train10_test10_color_videos.txt"
    pose_video_column_path = "ablation_train10_test10_black_pose_videos.txt"
    hoi_traj_path = "ablation_train10_test10_normalized_hoi_pose_trajs.txt"

    save_path = os.path.join(os.path.dirname(sampled_results_dir), "eval_hoi_pose_traj_train10.json")
    eval_results = {
        "MPJPE": [[]],
        "Smoothness": [[]],
        "ChamferDistance": [[]]
    }
    avg_MSE = 0
    avg_Smoothness = 0
    avg_ChamferDistance = 0

    def get_file_path_list(column_path, mode="prompts"):
        result_list = []
        cnt = 0
        with open(column_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                # if line and cnt < total_gen_sample:
                    cnt+=1
                    if mode == "prompts":
                        result_list.append(line)
                    else:
                        result_list.append(os.path.join(data_root, line))
                else:
                    break
        return result_list
    
    prompts_list = get_file_path_list(os.path.join(data_root, prompts_column_path), mode="prompts")
    img_path_list = get_file_path_list(os.path.join(data_root, img_column_path), mode="others")
    color_video_path_list = get_file_path_list(os.path.join(data_root, color_video_column_path), mode="others")
    pose_video_path_list = get_file_path_list(os.path.join(data_root, pose_video_column_path), mode="others")
    hoi_pose_traj_path_list = get_file_path_list(os.path.join(data_root, hoi_traj_path), mode="others")

    only_train_len = 10
    prompts_list = prompts_list[:only_train_len]
    img_path_list = img_path_list[:only_train_len]
    color_video_path_list = color_video_path_list[:only_train_len]
    pose_video_path_list = pose_video_path_list[:only_train_len]
    hoi_pose_traj_path_list = hoi_pose_traj_path_list[:only_train_len]

    print(f"len(rgb_video_path_list): {len(color_video_path_list)}")
    assert len(color_video_path_list) == len(pose_video_path_list) == len(img_path_list) == len(prompts_list) == len(hoi_pose_traj_path_list)

    if calculate_total_gen_sample is None:
        calculate_total_gen_sample = len(prompts_list)
        chosen_idx = np.arange(0, calculate_total_gen_sample)
    else:
        np.random.seed(42)
        # chosen_idx = natsort.natsorted(np.random.choice(np.arange(len(prompts_list)), calculate_total_gen_sample, replace=False))
        chosen_idx = np.arange(0, min(calculate_total_gen_sample, len(prompts_list)))

    for idx, curr_chosen_idx in enumerate(chosen_idx):
        # prompt, img_path, rgb_video_path, pose_video_path = \
        #     prompts_list[curr_chosen_idx], \
        #     img_path_list[curr_chosen_idx], \
        #     color_video_path_list[curr_chosen_idx], \
        #     pose_video_path_list[curr_chosen_idx]

        data_sort_idx = int(os.path.splitext(hoi_pose_traj_path_list[curr_chosen_idx])[0].split("_")[-1])
        seg_id = data_sort_idx // 2781
        print(f"curr_chosen_idx: {curr_chosen_idx}, seg_id: {seg_id}, data_sort_idx: {data_sort_idx}")
        # /share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus/hoi_pose_resize_norm_n1to1/seg_00/hoi_pose_resize_norm_n1to1_00000.npy
        gt_hoi_path = os.path.join(data_root, f"hoi_pose_resize_norm_n1to1/seg_{seg_id:02d}/hoi_pose_resize_norm_n1to1_{data_sort_idx:05d}.npy")
        pred_hoi_path = os.path.join(sampled_results_dir, f"pred_hoi_traj_{data_sort_idx:05d}_p0.npy")
        assert os.path.exists(gt_hoi_path) and os.path.exists(pred_hoi_path), f"不存在 gt_hoi_path: {gt_hoi_path}, pred_hoi_path: {pred_hoi_path}"
        # print(f"采样生成: idx: {idx}/{calculate_total_gen_sample}, gt_hoi_path {gt_hoi_path}")
        gt_hoi = np.load(gt_hoi_path)[None]
        pred_hoi = np.load(pred_hoi_path)
        print(f"gt: {gt_hoi.shape}, pred: {pred_hoi.shape}")
        # # 反归一化
        # expand_origin_min_z = 0.8523401065952239
        # expand_origin_max_z = 2.4990647112847197
        # pred_hoi[:, :, :, 0] = (pred_hoi[:, :, :, 0] + 1) * cogvideox_size_hw[1] / 2 #  + crop_left_top_x
        # pred_hoi[:, :, :, 1] = (pred_hoi[:, :, :, 1] + 1) * cogvideox_size_hw[0] / 2 #  + crop_left_top_y
        # pred_hoi[:, :, :, 2] = (pred_hoi[:, :, :, 2] + 1) * (expand_origin_max_z - expand_origin_min_z) / 2 #  + expand_origin_min_z

        # gt_hoi[:, :, :, 0] = (gt_hoi[:, :, :, 0] + 1) * cogvideox_size_hw[1] / 2 #  + crop_left_top_x
        # gt_hoi[:, :, :, 1] = (gt_hoi[:, :, :, 1] + 1) * cogvideox_size_hw[0] / 2 #  + crop_left_top_y
        # gt_hoi[:, :, :, 2] = (gt_hoi[:, :, :, 2] + 1) * (expand_origin_max_z - expand_origin_min_z) / 2 #  + expand_origin_min_z

        # [b, t, n, c]
        B, T, N, C = pred_hoi.shape
        pred_traj = pred_hoi.reshape(B, T, 2, -1, C)
        gt_traj = gt_hoi.reshape(B, T, 2, -1, C)

        pred_hand_traj = pred_traj[:, :, :, :21, :]
        gt_hand_traj = gt_traj[:, :, :, :21, :]
        pred_obj_traj = pred_traj[:, :, :, 21:, :]
        gt_obj_traj = gt_traj[:, :, :, 21:, :]
        # 计算指标
        mse = np.mean(np.abs((pred_hand_traj - gt_hand_traj)))
        velocity = pred_hand_traj[:, 1:] - pred_hand_traj[:, :-1]
        gt_velocity = gt_hand_traj[:, 1:] - gt_hand_traj[:, :-1]
        smoothness = np.mean(np.abs(velocity - gt_velocity))

        # [b t m k c] -> [bt, 2*128, 3]
        b, t, m, k, c = pred_obj_traj.shape
        pred_tool_traj = pred_obj_traj[:, :, 0, :, :].reshape(b*t, k, c)
        gt_tool_traj = gt_obj_traj[:, :, 0, :, :].reshape(b*t, k, c)
        tool_chamfer_dist = Chamfer_Distance(pred_tool_traj, gt_tool_traj)

        pred_target_traj = pred_obj_traj[:, :, 1, :, :].reshape(b*t, k, c)
        gt_target_traj = gt_obj_traj[:, :, 1, :, :].reshape(b*t, k, c)
        target_chamfer_dist = Chamfer_Distance(pred_target_traj, gt_target_traj)
        chamfer_dist = tool_chamfer_dist + target_chamfer_dist
        # chamfer_dist = 0

        eval_results["MPJPE"][0].append({
            "chosen_idx": int(curr_chosen_idx),
            "pred_hoi_path": pred_hoi_path,
            "score": mse
        })
        eval_results["Smoothness"][0].append({
            "chosen_idx": int(curr_chosen_idx),
            "pred_hoi_path": pred_hoi_path,
            "score": smoothness
        })
        eval_results["ChamferDistance"][0].append({
            "chosen_idx": int(curr_chosen_idx),
            "pred_hoi_path": pred_hoi_path,
            "score": chamfer_dist
        })
        avg_MSE += mse
        avg_Smoothness += smoothness
        avg_ChamferDistance += chamfer_dist

    assert len(eval_results["MPJPE"][0]) == len(eval_results["Smoothness"][0]) == len(eval_results["ChamferDistance"][0])
    print(f'len(eval_results["MPJPE"][0]): {len(eval_results["MPJPE"][0])}')
    avg_MSE /= len(eval_results["MPJPE"][0])
    avg_Smoothness /= len(eval_results["Smoothness"][0])
    avg_ChamferDistance /= len(eval_results["ChamferDistance"][0])
    eval_results["MPJPE"].insert(0, avg_MSE)
    eval_results["ChamferDistance"].insert(0, avg_ChamferDistance)
    eval_results["Smoothness"].insert(0, avg_Smoothness)
    # print(f"eval_results: {eval_results}")
    print(f"avg_MSE: {avg_MSE}")
    print(f"avg_Smoothness: {avg_Smoothness}")
    print(f"avg_ChamferDistance: {avg_ChamferDistance}")
    with open(save_path, 'w') as f:
        json.dump(eval_results, f, indent=4)


def eval_hoi_pose_traj_EMDM_and_GUESS(calculate_total_gen_sample=None):
    cogvideox_size_hw = [240, 368]
    # sampled_results_dir = "/share/home/wuqingyao_danglingwei/outputs/for_paper/EMDM_pose_results/results"
    # sampled_results_dir = "/share/home/wuqingyao_danglingwei/outputs/for_paper_20250409/416x624/mdm/samples_ckptgoon_2000/npy"
    sampled_results_dir = "/WORK/PUBLIC/liuyebin_work/lingweidang/outputs/for_paper/ablation/auxiliary_motion_diffusion/based_on_ours_53200/ours_w_mgan_based_on_ours_53200_240x368/sample_ours_w_mgan_based_on_ours_53200_ckpt2800_240x368"

    data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    prompts_column_path = "test_small_151_VLMEnhanced_prompts.txt"
    img_column_path = "test_small_151_images_416x624.txt"
    color_video_column_path = "test_small_151_color_videos_416x624.txt"
    pose_video_column_path = "test_small_151_black_pose_videos_416x624.txt"
    hoi_traj_path = "test_small_151_normalized_hoi_pose_trajs.txt"

    save_path = os.path.join(os.path.dirname(sampled_results_dir), "eval_hoi_pose_traj.json")
    eval_results = {
        "MPJPE": [[]],
        "Smoothness": [[]],
        "ChamferDistance": [[]]
    }
    avg_MSE = 0
    avg_Smoothness = 0
    avg_ChamferDistance = 0

    def get_file_path_list(column_path, mode="prompts"):
        result_list = []
        cnt = 0
        with open(column_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                # if line and cnt < total_gen_sample:
                    cnt+=1
                    if mode == "prompts":
                        result_list.append(line)
                    else:
                        result_list.append(os.path.join(data_root, line))
                else:
                    break
        return result_list
    
    prompts_list = get_file_path_list(os.path.join(data_root, prompts_column_path), mode="prompts")
    img_path_list = get_file_path_list(os.path.join(data_root, img_column_path), mode="others")
    color_video_path_list = get_file_path_list(os.path.join(data_root, color_video_column_path), mode="others")
    pose_video_path_list = get_file_path_list(os.path.join(data_root, pose_video_column_path), mode="others")
    hoi_pose_traj_path_list = get_file_path_list(os.path.join(data_root, hoi_traj_path), mode="others")

    print(f"len(rgb_video_path_list): {len(color_video_path_list)}")
    assert len(color_video_path_list) == len(pose_video_path_list) == len(img_path_list) == len(prompts_list) == len(hoi_pose_traj_path_list)

    if calculate_total_gen_sample is None:
        calculate_total_gen_sample = len(prompts_list)
        chosen_idx = np.arange(0, calculate_total_gen_sample)
    else:
        np.random.seed(42)
        # chosen_idx = natsort.natsorted(np.random.choice(np.arange(len(prompts_list)), calculate_total_gen_sample, replace=False))
        chosen_idx = np.arange(0, min(calculate_total_gen_sample, len(prompts_list)))

    for idx, curr_chosen_idx in enumerate(chosen_idx):
        # prompt, img_path, rgb_video_path, pose_video_path = \
        #     prompts_list[curr_chosen_idx], \
        #     img_path_list[curr_chosen_idx], \
        #     color_video_path_list[curr_chosen_idx], \
        #     pose_video_path_list[curr_chosen_idx]

        data_sort_idx = int(os.path.splitext(hoi_pose_traj_path_list[curr_chosen_idx])[0].split("_")[-1])
        seg_id = data_sort_idx // 2781
        print(f"curr_chosen_idx: {curr_chosen_idx}, seg_id: {seg_id}, data_sort_idx: {data_sort_idx}")
        # /share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus/hoi_pose_resize_norm_n1to1/seg_00/hoi_pose_resize_norm_n1to1_00000.npy
        gt_hoi_path = os.path.join(data_root, f"hoi_pose_resize_norm_n1to1/seg_{seg_id:02d}/hoi_pose_resize_norm_n1to1_{data_sort_idx:05d}.npy")
        pred_hoi_path = os.path.join(sampled_results_dir, os.path.basename(gt_hoi_path))
        # 不存在 gt_hoi_path: /share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus/hoi_pose_resize_norm_n1to1/seg_00/hoi_pose_resize_norm_n1to1_00689.npy, 
        # pred_hoi_path: /share/home/wuqingyao_danglingwei/outputs/for_paper/emdm250401/samples_ckpt_goon2000/npy/hoi_pose_resize_norm_n1to1_00689.npy
        assert os.path.exists(gt_hoi_path) and os.path.exists(pred_hoi_path), f"不存在 gt_hoi_path: {gt_hoi_path}, pred_hoi_path: {pred_hoi_path}"
        # print(f"采样生成: idx: {idx}/{calculate_total_gen_sample}, gt_hoi_path {gt_hoi_path}")
        gt_hoi = np.load(gt_hoi_path)[None] # [b, 49, 298, 3]
        pred_hoi = np.load(pred_hoi_path)[None] # [b, 49, 42, 3]
        print(f"gt: {gt_hoi.shape}, pred: {pred_hoi.shape}")
        # # 反归一化
        # expand_origin_min_z = 0.8523401065952239
        # expand_origin_max_z = 2.4990647112847197
        # pred_hoi[:, :, :, 0] = (pred_hoi[:, :, :, 0] + 1) * cogvideox_size_hw[1] / 2 #  + crop_left_top_x
        # pred_hoi[:, :, :, 1] = (pred_hoi[:, :, :, 1] + 1) * cogvideox_size_hw[0] / 2 #  + crop_left_top_y
        # pred_hoi[:, :, :, 2] = (pred_hoi[:, :, :, 2] + 1) * (expand_origin_max_z - expand_origin_min_z) / 2 #  + expand_origin_min_z

        # gt_hoi[:, :, :, 0] = (gt_hoi[:, :, :, 0] + 1) * cogvideox_size_hw[1] / 2 #  + crop_left_top_x
        # gt_hoi[:, :, :, 1] = (gt_hoi[:, :, :, 1] + 1) * cogvideox_size_hw[0] / 2 #  + crop_left_top_y
        # gt_hoi[:, :, :, 2] = (gt_hoi[:, :, :, 2] + 1) * (expand_origin_max_z - expand_origin_min_z) / 2 #  + expand_origin_min_z

        # [b, t, n, c]
        B, T, N, C = pred_hoi.shape
        pred_traj = pred_hoi.reshape(B, T, 2, -1, C)
        gt_traj = gt_hoi.reshape(B, T, 2, -1, C)

        pred_hand_traj = pred_traj[:, :, :, :21, :]
        gt_hand_traj = gt_traj[:, :, :, :21, :]
        pred_obj_traj = pred_traj[:, :, :, 21:, :]
        gt_obj_traj = gt_traj[:, :, :, 21:, :]
        # 计算指标
        mse = np.mean(np.abs((pred_hand_traj - gt_hand_traj)))
        velocity = pred_hand_traj[:, 1:] - pred_hand_traj[:, :-1]
        gt_velocity = gt_hand_traj[:, 1:] - gt_hand_traj[:, :-1]
        smoothness = np.mean(np.abs(velocity - gt_velocity))

        # [b t m k c] -> [bt, 2*128, 3]
        b, t, m, k, c = pred_obj_traj.shape
        pred_tool_traj = pred_obj_traj[:, :, 0, :, :].reshape(b*t, k, c)
        gt_tool_traj = gt_obj_traj[:, :, 0, :, :].reshape(b*t, k, c)
        tool_chamfer_dist = Chamfer_Distance(pred_tool_traj, gt_tool_traj)

        pred_target_traj = pred_obj_traj[:, :, 1, :, :].reshape(b*t, k, c)
        gt_target_traj = gt_obj_traj[:, :, 1, :, :].reshape(b*t, k, c)
        target_chamfer_dist = Chamfer_Distance(pred_target_traj, gt_target_traj)
        chamfer_dist = tool_chamfer_dist + target_chamfer_dist
        # chamfer_dist = 0

        eval_results["MPJPE"][0].append({
            "chosen_idx": int(curr_chosen_idx),
            "pred_hoi_path": pred_hoi_path,
            "score": mse
        })
        eval_results["Smoothness"][0].append({
            "chosen_idx": int(curr_chosen_idx),
            "pred_hoi_path": pred_hoi_path,
            "score": smoothness
        })
        eval_results["ChamferDistance"][0].append({
            "chosen_idx": int(curr_chosen_idx),
            "pred_hoi_path": pred_hoi_path,
            "score": chamfer_dist
        })
        avg_MSE += mse
        avg_Smoothness += smoothness
        avg_ChamferDistance += chamfer_dist

    assert len(eval_results["MPJPE"][0]) == len(eval_results["Smoothness"][0]) == len(eval_results["ChamferDistance"][0])
    print(f'len(eval_results["MPJPE"][0]): {len(eval_results["MPJPE"][0])}')
    avg_MSE /= len(eval_results["MPJPE"][0])
    avg_Smoothness /= len(eval_results["Smoothness"][0])
    avg_ChamferDistance /= len(eval_results["ChamferDistance"][0])
    eval_results["MPJPE"].insert(0, avg_MSE)
    eval_results["ChamferDistance"].insert(0, avg_ChamferDistance)
    eval_results["Smoothness"].insert(0, avg_Smoothness)
    # print(f"eval_results: {eval_results}")
    with open(save_path, 'w') as f:
        json.dump(eval_results, f, indent=4)



if __name__ == "__main__":
    eval_hoi_pose_traj_cogvideo(calculate_total_gen_sample=None)
    # eval_hoi_pose_traj_EMDM_and_GUESS(calculate_total_gen_sample=None)
    pass