#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   evaluate_hoi_pose_traj.py
@Time    :   2025/03/18 14:37:38
@Author  :   Sanqi Lu
@Contact :   sanqilu376@163.com
@Desc    :   当前文件作用
'''
import sys
sys.path.append(".")
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import numpy as np
import torch
import json
from scipy import linalg
from einops import rearrange

# from custom_evaluate.fid_pose_reconstructor.emdm_motion_encoder import MotionEncoderBiGRUCo, MovementConvEncoder
from custom_evaluate.fid_pose_reconstructor.cvae_encoder.cvae import CVAE

def eval_hoi_pose_traj_cogvideo(calculate_total_gen_sample=None, motion_enc=None, device="cuda:0"):
    cogvideox_size_hw = [240, 368]
    sampled_results_dir = "/WORK/PUBLIC/liuyebin_work/lingweidang/outputs/for_paper/ablation/auxiliary_motion_diffusion/based_on_ours_53200/ours_w_vid_lora_based_on_ours_53200_240x368/sample_ours_w_mgan_lora_based_on_ours_53200_ckpt600_240x368"
    
    # data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    data_root = "/WORK/PUBLIC/liuyebin_work/lingweidang/datas/TACO_Data_20250314/full_20k_plus"
    # prompts_column_path = "test_small_151_VLMEnhanced_prompts.txt"
    # img_column_path = "test_small_151_images_416x624.txt"
    # color_video_column_path = "test_small_151_color_videos_416x624.txt"
    # pose_video_column_path = "test_small_151_black_pose_videos_416x624.txt"
    # hoi_traj_path = "test_small_151_normalized_hoi_pose_trajs.txt"

    prompts_column_path = "ablation_train10_test10_prompts.txt" # test_small_151_VLMEnhanced_prompts
    img_column_path = "ablation_train10_test10_images.txt" # test_small_151_images_416x624
    color_video_column_path = "ablation_train10_test10_color_videos.txt"
    pose_video_column_path = "ablation_train10_test10_black_pose_videos.txt"
    hoi_traj_path = "ablation_train10_test10_normalized_hoi_pose_trajs.txt"

    def get_file_path_list(column_path, mode="prompts"):
        result_list = []
        cnt = 0
        with open(column_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                # if line and cnt < total_gen_sample:
                    cnt+=1
                    if mode == "prompts":
                        result_list.append(line)
                    else:
                        result_list.append(os.path.join(data_root, line))
                else:
                    break
        return result_list
    
    prompts_list = get_file_path_list(os.path.join(data_root, prompts_column_path), mode="prompts")
    img_path_list = get_file_path_list(os.path.join(data_root, img_column_path), mode="others")
    color_video_path_list = get_file_path_list(os.path.join(data_root, color_video_column_path), mode="others")
    pose_video_path_list = get_file_path_list(os.path.join(data_root, pose_video_column_path), mode="others")
    hoi_pose_traj_path_list = get_file_path_list(os.path.join(data_root, hoi_traj_path), mode="others")

    only_train_len = 10
    prompts_list = prompts_list[:only_train_len]
    img_path_list = img_path_list[:only_train_len]
    color_video_path_list = color_video_path_list[:only_train_len]
    pose_video_path_list = pose_video_path_list[:only_train_len]
    hoi_pose_traj_path_list = hoi_pose_traj_path_list[:only_train_len]

    print(f"len(rgb_video_path_list): {len(color_video_path_list)}")
    assert len(color_video_path_list) == len(pose_video_path_list) == len(img_path_list) == len(prompts_list) == len(hoi_pose_traj_path_list)

    if calculate_total_gen_sample is None:
        calculate_total_gen_sample = len(prompts_list)
        chosen_idx = np.arange(0, calculate_total_gen_sample)
    else:
        np.random.seed(42)
        # chosen_idx = natsort.natsorted(np.random.choice(np.arange(len(prompts_list)), calculate_total_gen_sample, replace=False))
        chosen_idx = np.arange(0, min(calculate_total_gen_sample, len(prompts_list)))

    all_gt_embed = []
    all_pred_embed = []
    for idx, curr_chosen_idx in enumerate(chosen_idx):
        # prompt, img_path, rgb_video_path, pose_video_path = \
        #     prompts_list[curr_chosen_idx], \
        #     img_path_list[curr_chosen_idx], \
        #     color_video_path_list[curr_chosen_idx], \
        #     pose_video_path_list[curr_chosen_idx]

        data_sort_idx = int(os.path.splitext(hoi_pose_traj_path_list[curr_chosen_idx])[0].split("_")[-1])
        seg_id = data_sort_idx // 2781
        # /share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus/hoi_pose_resize_norm_n1to1/seg_00/hoi_pose_resize_norm_n1to1_00000.npy
        gt_hoi_path = os.path.join(data_root, f"hoi_pose_resize_norm_n1to1/seg_{seg_id:02d}/hoi_pose_resize_norm_n1to1_{data_sort_idx:05d}.npy")
        pred_hoi_path = os.path.join(sampled_results_dir, f"pred_hoi_traj_{data_sort_idx:05d}_p0.npy")
        assert os.path.exists(gt_hoi_path) and os.path.exists(pred_hoi_path), f"不存在 gt_hoi_path: {gt_hoi_path}, pred_hoi_path: {pred_hoi_path}"
        # print(f"采样生成: idx: {idx}/{calculate_total_gen_sample}, gt_hoi_path {gt_hoi_path}")
        gt_hoi = np.load(gt_hoi_path)[None]
        pred_hoi = np.load(pred_hoi_path)
        print(f"{idx}, gt: {gt_hoi.shape}, pred: {pred_hoi.shape}")
        # # 反归一化
        # expand_origin_min_z = 0.8523401065952239
        # expand_origin_max_z = 2.4990647112847197
        # pred_hoi[:, :, :, 0] = (pred_hoi[:, :, :, 0] + 1) * cogvideox_size_hw[1] / 2 #  + crop_left_top_x
        # pred_hoi[:, :, :, 1] = (pred_hoi[:, :, :, 1] + 1) * cogvideox_size_hw[0] / 2 #  + crop_left_top_y
        # pred_hoi[:, :, :, 2] = (pred_hoi[:, :, :, 2] + 1) * (expand_origin_max_z - expand_origin_min_z) / 2 #  + expand_origin_min_z

        # gt_hoi[:, :, :, 0] = (gt_hoi[:, :, :, 0] + 1) * cogvideox_size_hw[1] / 2 #  + crop_left_top_x
        # gt_hoi[:, :, :, 1] = (gt_hoi[:, :, :, 1] + 1) * cogvideox_size_hw[0] / 2 #  + crop_left_top_y
        # gt_hoi[:, :, :, 2] = (gt_hoi[:, :, :, 2] + 1) * (expand_origin_max_z - expand_origin_min_z) / 2 #  + expand_origin_min_z

        # [b, t, n, c]
        B, T, N, C = pred_hoi.shape
        gt_hoi = torch.from_numpy(gt_hoi).float().to(device)
        pred_hoi = torch.from_numpy(pred_hoi).float().to(device)

        gt_emb = motion_enc.embed_data(rearrange(gt_hoi, "b t v c -> b (v c) t")).mean(dim=-1)
        pred_emd = motion_enc.embed_data(rearrange(pred_hoi, "b t v c -> b (v c) t")).mean(dim=-1) # [b, d, t] -> [b, d]\

        all_gt_embed.append(gt_emb.cpu().data.numpy())
        all_pred_embed.append(pred_emd.cpu().data.numpy())

    all_gt_embed = np.concatenate(all_gt_embed, axis=0)
    gt_mu, gt_cov = calculate_activation_statistics(all_gt_embed)
    
    all_pred_embed = np.concatenate(all_pred_embed, axis=0)
    mu, cov = calculate_activation_statistics(all_pred_embed)
    fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov)
    
    print(f"fid: {fid}")


def eval_hoi_pose_traj_EMDM_and_GUESS(calculate_total_gen_sample=None, motion_enc=None, device="cuda:0"):
    cogvideox_size_hw = [240, 368]
    # sampled_results_dir = "/share/home/wuqingyao_danglingwei/outputs/for_paper/EMDM_pose_results/results"
    sampled_results_dir = "/share/home/wuqingyao_danglingwei/outputs/for_paper/emdm250401/samples_ckpt_goon2000/WORK/PUBLIC/liuyebin_work/lingweidang/outputs/for_paper/emdm250401/samples_ckpt_goon2000/npy"

    data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    prompts_column_path = "test_small_151_VLMEnhanced_prompts.txt"
    img_column_path = "test_small_151_images.txt"
    color_video_column_path = "test_small_151_color_videos.txt"
    pose_video_column_path = "test_small_151_black_pose_videos.txt"
    hoi_traj_path = "test_small_151_normalized_hoi_pose_trajs.txt"

    def get_file_path_list(column_path, mode="prompts"):
        result_list = []
        cnt = 0
        with open(column_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                # if line and cnt < total_gen_sample:
                    cnt+=1
                    if mode == "prompts":
                        result_list.append(line)
                    else:
                        result_list.append(os.path.join(data_root, line))
                else:
                    break
        return result_list
    
    prompts_list = get_file_path_list(os.path.join(data_root, prompts_column_path), mode="prompts")
    img_path_list = get_file_path_list(os.path.join(data_root, img_column_path), mode="others")
    color_video_path_list = get_file_path_list(os.path.join(data_root, color_video_column_path), mode="others")
    pose_video_path_list = get_file_path_list(os.path.join(data_root, pose_video_column_path), mode="others")
    hoi_pose_traj_path_list = get_file_path_list(os.path.join(data_root, hoi_traj_path), mode="others")

    print(f"len(rgb_video_path_list): {len(color_video_path_list)}")
    assert len(color_video_path_list) == len(pose_video_path_list) == len(img_path_list) == len(prompts_list) == len(hoi_pose_traj_path_list)

    if calculate_total_gen_sample is None:
        calculate_total_gen_sample = len(prompts_list)
        chosen_idx = np.arange(0, calculate_total_gen_sample)
    else:
        np.random.seed(42)
        # chosen_idx = natsort.natsorted(np.random.choice(np.arange(len(prompts_list)), calculate_total_gen_sample, replace=False))
        chosen_idx = np.arange(0, min(calculate_total_gen_sample, len(prompts_list)))

    all_gt_embed = []
    all_pred_embed = []
    for idx, curr_chosen_idx in enumerate(chosen_idx):
        # prompt, img_path, rgb_video_path, pose_video_path = \
        #     prompts_list[curr_chosen_idx], \
        #     img_path_list[curr_chosen_idx], \
        #     color_video_path_list[curr_chosen_idx], \
        #     pose_video_path_list[curr_chosen_idx]

        data_sort_idx = int(os.path.splitext(hoi_pose_traj_path_list[curr_chosen_idx])[0].split("_")[-1])
        seg_id = data_sort_idx // 2781
        # /share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus/hoi_pose_resize_norm_n1to1/seg_00/hoi_pose_resize_norm_n1to1_00000.npy
        gt_hoi_path = os.path.join(data_root, f"hoi_pose_resize_norm_n1to1/seg_{seg_id:02d}/hoi_pose_resize_norm_n1to1_{data_sort_idx:05d}.npy")
        pred_hoi_path = os.path.join(sampled_results_dir, os.path.basename(gt_hoi_path))
        # 不存在 gt_hoi_path: /share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus/hoi_pose_resize_norm_n1to1/seg_00/hoi_pose_resize_norm_n1to1_00689.npy, 
        # pred_hoi_path: /share/home/wuqingyao_danglingwei/outputs/for_paper/emdm250401/samples_ckpt_goon2000/npy/hoi_pose_resize_norm_n1to1_00689.npy
        assert os.path.exists(gt_hoi_path) and os.path.exists(pred_hoi_path), f"不存在 gt_hoi_path: {gt_hoi_path}, pred_hoi_path: {pred_hoi_path}"
        # print(f"采样生成: idx: {idx}/{calculate_total_gen_sample}, gt_hoi_path {gt_hoi_path}")
        gt_hoi = np.load(gt_hoi_path)[None] # [b, 49, 298, 3]
        pred_hoi = np.load(pred_hoi_path)[None] # [b, 49, 42, 3]
        print(f"{idx}, gt: {gt_hoi.shape}, pred: {pred_hoi.shape}")
        # # 反归一化
        # expand_origin_min_z = 0.8523401065952239
        # expand_origin_max_z = 2.4990647112847197
        # pred_hoi[:, :, :, 0] = (pred_hoi[:, :, :, 0] + 1) * cogvideox_size_hw[1] / 2 #  + crop_left_top_x
        # pred_hoi[:, :, :, 1] = (pred_hoi[:, :, :, 1] + 1) * cogvideox_size_hw[0] / 2 #  + crop_left_top_y
        # pred_hoi[:, :, :, 2] = (pred_hoi[:, :, :, 2] + 1) * (expand_origin_max_z - expand_origin_min_z) / 2 #  + expand_origin_min_z

        # gt_hoi[:, :, :, 0] = (gt_hoi[:, :, :, 0] + 1) * cogvideox_size_hw[1] / 2 #  + crop_left_top_x
        # gt_hoi[:, :, :, 1] = (gt_hoi[:, :, :, 1] + 1) * cogvideox_size_hw[0] / 2 #  + crop_left_top_y
        # gt_hoi[:, :, :, 2] = (gt_hoi[:, :, :, 2] + 1) * (expand_origin_max_z - expand_origin_min_z) / 2 #  + expand_origin_min_z

        # [b, t, n, c]
        B, T, N, C = pred_hoi.shape
        gt_hoi = torch.from_numpy(gt_hoi).float().to(device)
        pred_hoi = torch.from_numpy(pred_hoi).float().to(device)

        gt_emb = motion_enc.embed_data(rearrange(gt_hoi, "b t v c -> b (v c) t")).mean(dim=-1)
        pred_emd = motion_enc.embed_data(rearrange(pred_hoi, "b t v c -> b (v c) t")).mean(dim=-1) # [b, d, t] -> [b, d]\

        all_gt_embed.append(gt_emb.cpu().data.numpy())
        all_pred_embed.append(pred_emd.cpu().data.numpy())
        # print(f"gt_emb: {gt_emb.shape}, pred_emd: {pred_emd.shape}")
        # gt_motion_emb = get_motion_embeddings(movement_encoder=None, motion_encoder=motion_enc, motions=gt_hoi.view(B, T, -1))
        # print(f"gt_motion_emb: {gt_motion_emb.shape}")
        
    all_gt_embed = np.concatenate(all_gt_embed, axis=0)
    gt_mu, gt_cov = calculate_activation_statistics(all_gt_embed)
    
    all_pred_embed = np.concatenate(all_pred_embed, axis=0)
    mu, cov = calculate_activation_statistics(all_pred_embed)
    fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov)
    
    print(f"fid: {fid}")



def calculate_activation_statistics(activations):
    """
    Params:
    -- activation: num_samples x dim_feat
    Returns:
    -- mu: dim_feat
    -- sigma: dim_feat x dim_feat
    """
    mu = np.mean(activations, axis=0)
    cov = np.cov(activations, rowvar=False)
    return mu, cov

def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
    """Numpy implementation of the Frechet Distance.
    The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
    and X_2 ~ N(mu_2, C_2) is
            d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
    Stable version by Dougal J. Sutherland.
    Params:
    -- mu1   : Numpy array containing the activations of a layer of the
               inception net (like returned by the function 'get_predictions')
               for generated samples.
    -- mu2   : The sample mean over activations, precalculated on an
               representative dataset set.
    -- sigma1: The covariance matrix over activations for generated samples.
    -- sigma2: The covariance matrix over activations, precalculated on an
               representative dataset set.
    Returns:
    --   : The Frechet Distance.
    """

    mu1 = np.atleast_1d(mu1)
    mu2 = np.atleast_1d(mu2)

    sigma1 = np.atleast_2d(sigma1)
    sigma2 = np.atleast_2d(sigma2)

    assert mu1.shape == mu2.shape, \
        'Training and test mean vectors have different lengths'
    assert sigma1.shape == sigma2.shape, \
        'Training and test covariances have different dimensions'

    diff = mu1 - mu2

    # Product might be almost singular
    covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
    if not np.isfinite(covmean).all():
        msg = ('fid calculation produces singular product; '
               'adding %s to diagonal of cov estimates') % eps
        print(msg)
        offset = np.eye(sigma1.shape[0]) * eps
        covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))

    # Numerical error might give slight imaginary component
    if np.iscomplexobj(covmean):
        if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
            m = np.max(np.abs(covmean.imag))
            raise ValueError('Imaginary component {}'.format(m))
        covmean = covmean.real

    tr_covmean = np.trace(covmean)

    return (diff.dot(diff) + np.trace(sigma1) +
            np.trace(sigma2) - 2 * tr_covmean)


if __name__ == "__main__":
    device = "cuda:0"
    # encodor_path = r"/share/home/wuqingyao_danglingwei/model_zoos/emdm_evaluator/ours_cvae/model_000524000.pth"
    encodor_path = "/WORK/PUBLIC/liuyebin_work/lingweidang/model_zoos/emdm_evaluator/ours_cvae/model_000524000.pth"
    checkpoint = torch.load(encodor_path, map_location=device)    
    # movement_enc = MovementConvEncoder(
    #     input_size=298*3, hidden_size=512, output_size=512
    # )
    # movement_enc.load_state_dict(checkpoint['movement_encoder'])

    motion_enc = CVAE()
    motion_enc.to(device)    
    missing, unexpected = motion_enc.load_state_dict(checkpoint['model'], strict=False)
    motion_enc.eval()
    print('Missing Keys: ', missing)
    print('Unexpected Keys: ', unexpected)

    eval_hoi_pose_traj_cogvideo(calculate_total_gen_sample=None, motion_enc=motion_enc, device=device)
    # eval_hoi_pose_traj_EMDM_and_GUESS(calculate_total_gen_sample=None, motion_enc=motion_enc, device=device)
    pass