#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File    :   make_samples.py
@Time    :   2025/03/14 16:01:58
@Author  :   Sanqi Lu
@Contact :   sanqilu376@163.com
@Desc    :   当前文件作用
'''
import os
import sys
sys.path.append(".")
import numpy as np
import shutil
import imageio
import natsort

def make_samples_for_sft_cogvideo(calculate_total_gen_sample=None, gt_flag=False):
    sampled_results_dir = "/WORK/PUBLIC/liuyebin_work/lingweidang/outputs/for_paper/ablation/auxiliary_motion_diffusion/based_on_ours_53200/ours_w_vid_lora_based_on_ours_53200_240x368/sample_ours_w_mgan_lora_based_on_ours_53200_ckpt600_240x368"
    data_to_eval_dir = os.path.join(os.path.dirname(sampled_results_dir), f"samples_for_vbench" if not gt_flag else f"gt_vbench_data_{calculate_total_gen_sample}") # samples_for_vbench, samples_for_vbench_only_train10
    os.makedirs(data_to_eval_dir, exist_ok=True)

    # # data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    data_root = "/WORK/PUBLIC/liuyebin_work/lingweidang/datas/TACO_Data_20250314/full_20k_plus"
    # prompts_column_path = "test_small_151_VLMEnhanced_prompts.txt"
    # img_column_path = "test_small_151_images_416x624.txt"
    # color_video_column_path = "test_small_151_color_videos_416x624.txt"
    # pose_video_column_path = "test_small_151_black_pose_videos_416x624.txt"
    # hoi_traj_path = "test_small_151_normalized_hoi_pose_trajs.txt"

    prompts_column_path = "ablation_train10_test10_prompts.txt" # test_small_151_VLMEnhanced_prompts
    img_column_path = "ablation_train10_test10_images.txt" # test_small_151_images_416x624
    color_video_column_path = "ablation_train10_test10_color_videos.txt"
    pose_video_column_path = "ablation_train10_test10_black_pose_videos.txt"
    hoi_traj_path = "ablation_train10_test10_normalized_hoi_pose_trajs.txt"

    same_prompts_dict = {}

    def get_file_path_list(column_path, mode="prompts"):
        result_list = []
        cnt = 0
        with open(column_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                # if line and cnt < total_gen_sample:
                    cnt+=1
                    if mode == "prompts":
                        result_list.append(line)
                    else:
                        result_list.append(os.path.join(data_root, line))
                else:
                    break
        return result_list
    
    prompts_list = get_file_path_list(os.path.join(data_root, prompts_column_path), mode="prompts")
    img_path_list = get_file_path_list(os.path.join(data_root, img_column_path), mode="others")
    color_video_path_list = get_file_path_list(os.path.join(data_root, color_video_column_path), mode="others")
    pose_video_path_list = get_file_path_list(os.path.join(data_root, pose_video_column_path), mode="others")
    hoi_pose_traj_path_list = get_file_path_list(os.path.join(data_root, hoi_traj_path), mode="others")

    only_train_len = 10
    prompts_list = prompts_list[:only_train_len]
    img_path_list = img_path_list[:only_train_len]
    color_video_path_list = color_video_path_list[:only_train_len]
    pose_video_path_list = pose_video_path_list[:only_train_len]
    hoi_pose_traj_path_list = hoi_pose_traj_path_list[:only_train_len]

    # prompts_list = prompts_list[only_train_len:]
    # img_path_list = img_path_list[only_train_len:]
    # color_video_path_list = color_video_path_list[only_train_len:]
    # pose_video_path_list = pose_video_path_list[only_train_len:]
    # hoi_pose_traj_path_list = hoi_pose_traj_path_list[only_train_len:]

    print(f"len(rgb_video_path_list): {len(color_video_path_list)}")
    assert len(color_video_path_list) == len(pose_video_path_list) == len(img_path_list) == len(prompts_list) == len(hoi_pose_traj_path_list)

    if calculate_total_gen_sample is None:
        calculate_total_gen_sample = len(prompts_list)
        chosen_idx = np.arange(0, calculate_total_gen_sample)
    else:
        np.random.seed(42)
        chosen_idx = np.arange(0, min(calculate_total_gen_sample, len(prompts_list)))
    
    for idx, curr_chosen_idx in enumerate(chosen_idx):
        prompt, img_path, rgb_video_path, pose_video_path = \
            prompts_list[curr_chosen_idx], \
            img_path_list[curr_chosen_idx], \
            color_video_path_list[curr_chosen_idx], \
            pose_video_path_list[curr_chosen_idx]        

        prompt = prompt[:226]
        print(f"采样生成: idx: {idx}/{calculate_total_gen_sample}, prompt: {prompt}, img_path: {img_path}, rgb_video_path: {rgb_video_path}, pose_video_path: {pose_video_path}")
        # pred_color_video_00000, pred_hoi_traj_00000, pred_pose_video_00036, 
        # gt_hoi_traj_00000, 
        if prompt in same_prompts_dict:
            same_prompts_dict[prompt] += 1
        else:
            same_prompts_dict[prompt] = 0

        if os.path.exists(img_path):
            shutil.copyfile(img_path, os.path.join(data_to_eval_dir, f"{prompt}-{same_prompts_dict[prompt]}.png"))
        else:
            print(f"img_path: {img_path} 不存在")
        
        if gt_flag:
            sampled_rgb_video_path = rgb_video_path
        else:
            data_sort_idx = os.path.splitext(os.path.basename(rgb_video_path))[0].split("_")[-1]
            sampled_rgb_video_path = os.path.join(sampled_results_dir, f"pred_color_video_{data_sort_idx}_p0.mp4")
        if os.path.exists(sampled_rgb_video_path):
            shutil.copyfile(sampled_rgb_video_path, os.path.join(data_to_eval_dir, f"{prompt}-{same_prompts_dict[prompt]}.mp4"))
        else:
            print(f"sampled_rgb_video_path: {sampled_rgb_video_path} 不存在")
    
    print(f"Done!")


def make_test_data_for_animate_anyone(calculate_total_gen_sample=None, gt_flag=False):
    img_hw = [416, 624]
    sampled_results_dir = "/share/home/wuqingyao_danglingwei/outputs/for_paper_20250409/416x624/animateanyone/animateanyone_samples_416_624"
    data_to_eval_dir = os.path.join("/share/home/wuqingyao_danglingwei/outputs/for_paper_20250409/416x624/animateanyone", "samples_for_vbench")
    os.makedirs(data_to_eval_dir, exist_ok=True)

    data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    prompts_column_path = "test_small_151_VLMEnhanced_prompts.txt"
    img_column_path = "test_small_151_images_416x624.txt"
    color_video_column_path = "test_small_151_color_videos_416x624.txt"
    pose_video_column_path = "test_small_151_black_pose_videos_416x624.txt"
    hoi_traj_path = "test_small_151_normalized_hoi_pose_trajs.txt"

    same_prompts_dict = {}

    def get_file_path_list(column_path, mode="prompts"):
        result_list = []
        cnt = 0
        with open(column_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                # if line and cnt < total_gen_sample:
                    cnt+=1
                    if mode == "prompts":
                        result_list.append(line)
                    else:
                        result_list.append(os.path.join(data_root, line))
                else:
                    break
        return result_list
    
    prompts_list = get_file_path_list(os.path.join(data_root, prompts_column_path), mode="prompts")
    img_path_list = get_file_path_list(os.path.join(data_root, img_column_path), mode="others")
    color_video_path_list = get_file_path_list(os.path.join(data_root, color_video_column_path), mode="others")
    pose_video_path_list = get_file_path_list(os.path.join(data_root, pose_video_column_path), mode="others")
    hoi_pose_traj_path_list = get_file_path_list(os.path.join(data_root, hoi_traj_path), mode="others")

    print(f"len(rgb_video_path_list): {len(color_video_path_list)}")
    assert len(color_video_path_list) == len(pose_video_path_list) == len(img_path_list) == len(prompts_list) == len(hoi_pose_traj_path_list)

    if calculate_total_gen_sample is None:
        calculate_total_gen_sample = len(prompts_list)
        chosen_idx = np.arange(0, calculate_total_gen_sample)
    else:
        np.random.seed(42)
        chosen_idx = np.arange(0, min(calculate_total_gen_sample, len(prompts_list)))
    
    
    for idx, curr_chosen_idx in enumerate(chosen_idx):
        prompt, img_path, rgb_video_path, pose_video_path = \
            prompts_list[curr_chosen_idx], \
            img_path_list[curr_chosen_idx], \
            color_video_path_list[curr_chosen_idx], \
            pose_video_path_list[curr_chosen_idx]        

        prompt = prompt[:226]
        origin_data_sort_idx = int(os.path.splitext(os.path.basename(img_path))[0].split("_")[-1])
        print(f"采样生成: idx: {idx}/{calculate_total_gen_sample}, prompt: {prompt}, img_path: {img_path}, rgb_video_path: {rgb_video_path}, pose_video_path: {pose_video_path}")
        # pred_color_video_00000, pred_hoi_traj_00000, pred_pose_video_00036, 
        # gt_hoi_traj_00000, 
        if prompt in same_prompts_dict:
            same_prompts_dict[prompt] += 1
        else:
            same_prompts_dict[prompt] = 0

        if os.path.exists(img_path):
            shutil.copyfile(img_path, os.path.join(data_to_eval_dir, f"{prompt}-{same_prompts_dict[prompt]}.png"))
        else:
            print(f"不存在 img_path: {img_path}")
        
        if gt_flag:
            sampled_rgb_video_path = rgb_video_path
        else:
            # first_frame_240_368_00689_black_pose_video_240_368_00689_240x368_3_1134
            vname = f"first_frame_{img_hw[0]}_{img_hw[1]}_{origin_data_sort_idx:05d}_black_pose_video_{img_hw[0]}_{img_hw[1]}_{origin_data_sort_idx:05d}_{img_hw[0]}x{img_hw[1]}_3_1412"
            sampled_rgb_video_path = os.path.join(sampled_results_dir, f"{vname}.mp4")
        if os.path.exists(sampled_rgb_video_path):
            shutil.copyfile(sampled_rgb_video_path, os.path.join(data_to_eval_dir, f"{prompt}-{same_prompts_dict[prompt]}.mp4"))
        else:
            print(f"不存在: rgb_video_path: {sampled_rgb_video_path}")
    
    print(f"Done!")


def make_test_data_for_easy_animate(calculate_total_gen_sample=None, gt_flag=False):
    img_hw = [416, 624]
    sampled_results_dir = "/share/home/wuqingyao_danglingwei/outputs/for_paper_20250409/416x624/easyanimate/easyanimate_samples_test_small_151_416x624"
    data_to_eval_dir = os.path.join(os.path.dirname(sampled_results_dir), "samples_for_vbench")
    os.makedirs(data_to_eval_dir, exist_ok=True)

    data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    prompts_column_path = "test_small_151_VLMEnhanced_prompts.txt"
    img_column_path = "test_small_151_images_416x624.txt"
    color_video_column_path = "test_small_151_color_videos_416x624.txt"
    pose_video_column_path = "test_small_151_black_pose_videos_416x624.txt"
    hoi_traj_path = "test_small_151_normalized_hoi_pose_trajs.txt"

    same_prompts_dict = {}

    def get_file_path_list(column_path, mode="prompts"):
        result_list = []
        cnt = 0
        with open(column_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                # if line and cnt < total_gen_sample:
                    cnt+=1
                    if mode == "prompts":
                        result_list.append(line)
                    else:
                        result_list.append(os.path.join(data_root, line))
                else:
                    break
        return result_list
    
    prompts_list = get_file_path_list(os.path.join(data_root, prompts_column_path), mode="prompts")
    img_path_list = get_file_path_list(os.path.join(data_root, img_column_path), mode="others")
    color_video_path_list = get_file_path_list(os.path.join(data_root, color_video_column_path), mode="others")
    pose_video_path_list = get_file_path_list(os.path.join(data_root, pose_video_column_path), mode="others")
    hoi_pose_traj_path_list = get_file_path_list(os.path.join(data_root, hoi_traj_path), mode="others")

    print(f"len(rgb_video_path_list): {len(color_video_path_list)}")
    assert len(color_video_path_list) == len(pose_video_path_list) == len(img_path_list) == len(prompts_list) == len(hoi_pose_traj_path_list)

    if calculate_total_gen_sample is None:
        calculate_total_gen_sample = len(prompts_list)
        chosen_idx = np.arange(0, calculate_total_gen_sample)
    else:
        np.random.seed(42)
        chosen_idx = np.arange(0, min(calculate_total_gen_sample, len(prompts_list)))
    
    

    for idx, curr_chosen_idx in enumerate(chosen_idx):
        prompt, img_path, rgb_video_path, pose_video_path = \
            prompts_list[curr_chosen_idx], \
            img_path_list[curr_chosen_idx], \
            color_video_path_list[curr_chosen_idx], \
            pose_video_path_list[curr_chosen_idx]        

        prompt = prompt[:226]
        origin_data_sort_idx = int(os.path.splitext(os.path.basename(img_path))[0].split("_")[-1])
        print(f"采样生成: idx: {idx}/{calculate_total_gen_sample}, prompt: {prompt}, img_path: {img_path}, rgb_video_path: {rgb_video_path}, pose_video_path: {pose_video_path}")
        # pred_color_video_00000, pred_hoi_traj_00000, pred_pose_video_00036, 
        # gt_hoi_traj_00000, 
        if prompt in same_prompts_dict:
            same_prompts_dict[prompt] += 1
        else:
            same_prompts_dict[prompt] = 0

        if os.path.exists(img_path):
            shutil.copyfile(img_path, os.path.join(data_to_eval_dir, f"{prompt}-{same_prompts_dict[prompt]}.png"))
        else:
            print(f"不存在 img_path: {img_path}")
        
        if gt_flag:
            sampled_rgb_video_path = rgb_video_path
        else:
            sampled_rgb_video_path = os.path.join(sampled_results_dir, f"first_frame_{img_hw[0]}_{img_hw[1]}_{origin_data_sort_idx:05d}.mp4")
        if os.path.exists(sampled_rgb_video_path):
            shutil.copyfile(sampled_rgb_video_path, os.path.join(data_to_eval_dir, f"{prompt}-{same_prompts_dict[prompt]}.mp4"))
        else:
            print(f"不存在: rgb_video_path: {sampled_rgb_video_path}")
    
    print(f"Done!")


def make_test_data_for_cogvideo(calculate_total_gen_sample=None, gt_flag=False):
    img_hw = [416, 624]
    sampled_results_dir = "/share/home/wuqingyao_danglingwei/outputs/for_paper_20250409/416x624/cogvideo/sample_test151_ckpt_3000_resize_416x624"
    data_to_eval_dir = os.path.join(os.path.dirname(sampled_results_dir), "samples_for_vbench")
    os.makedirs(data_to_eval_dir, exist_ok=True)

    data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    prompts_column_path = "test_small_151_VLMEnhanced_prompts.txt"
    img_column_path = "test_small_151_images_416x624.txt"
    color_video_column_path = "test_small_151_color_videos_416x624.txt"
    pose_video_column_path = "test_small_151_black_pose_videos_416x624.txt"
    hoi_traj_path = "test_small_151_normalized_hoi_pose_trajs.txt"

    same_prompts_dict = {}

    def get_file_path_list(column_path, mode="prompts"):
        result_list = []
        cnt = 0
        with open(column_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                # if line and cnt < total_gen_sample:
                    cnt+=1
                    if mode == "prompts":
                        result_list.append(line)
                    else:
                        result_list.append(os.path.join(data_root, line))
                else:
                    break
        return result_list
    
    prompts_list = get_file_path_list(os.path.join(data_root, prompts_column_path), mode="prompts")
    img_path_list = get_file_path_list(os.path.join(data_root, img_column_path), mode="others")
    color_video_path_list = get_file_path_list(os.path.join(data_root, color_video_column_path), mode="others")
    pose_video_path_list = get_file_path_list(os.path.join(data_root, pose_video_column_path), mode="others")
    hoi_pose_traj_path_list = get_file_path_list(os.path.join(data_root, hoi_traj_path), mode="others")

    print(f"len(rgb_video_path_list): {len(color_video_path_list)}")
    assert len(color_video_path_list) == len(pose_video_path_list) == len(img_path_list) == len(prompts_list) == len(hoi_pose_traj_path_list)

    if calculate_total_gen_sample is None:
        calculate_total_gen_sample = len(prompts_list)
        chosen_idx = np.arange(0, calculate_total_gen_sample)
    else:
        np.random.seed(42)
        chosen_idx = np.arange(0, min(calculate_total_gen_sample, len(prompts_list)))
    
    for idx, curr_chosen_idx in enumerate(chosen_idx):
        prompt, img_path, rgb_video_path, pose_video_path = \
            prompts_list[curr_chosen_idx], \
            img_path_list[curr_chosen_idx], \
            color_video_path_list[curr_chosen_idx], \
            pose_video_path_list[curr_chosen_idx]        

        prompt = prompt[:226]
        origin_data_sort_idx = int(os.path.splitext(os.path.basename(img_path))[0].split("_")[-1])
        print(f"采样生成: idx: {idx}/{calculate_total_gen_sample}, prompt: {prompt}, img_path: {img_path}, rgb_video_path: {rgb_video_path}, pose_video_path: {pose_video_path}")
        # pred_color_video_00000, pred_hoi_traj_00000, pred_pose_video_00036, 
        # gt_hoi_traj_00000, 
        if prompt in same_prompts_dict:
            same_prompts_dict[prompt] += 1
        else:
            same_prompts_dict[prompt] = 0

        if os.path.exists(img_path):
            shutil.copyfile(img_path, os.path.join(data_to_eval_dir, f"{prompt}-{same_prompts_dict[prompt]}.png"))
        else:
            print(f"不存在 img_path: {img_path}")
        
        if gt_flag:
            sampled_rgb_video_path = rgb_video_path
        else:
            # pred_color_video_00000.mp4
            vname = f"pred_color_video_{curr_chosen_idx:05d}"
            sampled_rgb_video_path = os.path.join(sampled_results_dir, f"{vname}.mp4")
        if os.path.exists(sampled_rgb_video_path):
            shutil.copyfile(sampled_rgb_video_path, os.path.join(data_to_eval_dir, f"{prompt}-{same_prompts_dict[prompt]}.mp4"))
        else:
            print(f"不存在: rgb_video_path: {sampled_rgb_video_path}")
    
    print(f"Done!")


def make_test_data_for_hunyuan13b_and_wan21(calculate_total_gen_sample=None, gt_flag=False):
    img_hw = [416, 624]
    # sampled_results_dir = "/share/home/wuqingyao_danglingwei/outputs/for_paper_20250409/416x624/hunyuan13b/hunyuani2v13B_samples_test_151_416_624"
    sampled_results_dir = "/share/home/wuqingyao_danglingwei/outputs/for_paper_20250409/416x624/wan21/wan21_samples_test151_416_624/wan21_samples_test151_416_624"
    data_to_eval_dir = os.path.join(os.path.dirname(sampled_results_dir), "samples_for_vbench")
    os.makedirs(data_to_eval_dir, exist_ok=True)

    data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    prompts_column_path = "test_small_151_VLMEnhanced_prompts.txt"
    img_column_path = "test_small_151_images_416x624.txt"
    color_video_column_path = "test_small_151_color_videos_416x624.txt"
    pose_video_column_path = "test_small_151_black_pose_videos_416x624.txt"
    hoi_traj_path = "test_small_151_normalized_hoi_pose_trajs.txt"

    same_prompts_dict = {}

    def get_file_path_list(column_path, mode="prompts"):
        result_list = []
        cnt = 0
        with open(column_path, "r", encoding="utf-8") as f:
            for line in f:
                line = line.strip()
                if line:
                # if line and cnt < total_gen_sample:
                    cnt+=1
                    if mode == "prompts":
                        result_list.append(line)
                    else:
                        result_list.append(os.path.join(data_root, line))
                else:
                    break
        return result_list
    
    prompts_list = get_file_path_list(os.path.join(data_root, prompts_column_path), mode="prompts")
    img_path_list = get_file_path_list(os.path.join(data_root, img_column_path), mode="others")
    color_video_path_list = get_file_path_list(os.path.join(data_root, color_video_column_path), mode="others")
    pose_video_path_list = get_file_path_list(os.path.join(data_root, pose_video_column_path), mode="others")
    hoi_pose_traj_path_list = get_file_path_list(os.path.join(data_root, hoi_traj_path), mode="others")

    print(f"len(rgb_video_path_list): {len(color_video_path_list)}")
    assert len(color_video_path_list) == len(pose_video_path_list) == len(img_path_list) == len(prompts_list) == len(hoi_pose_traj_path_list)

    if calculate_total_gen_sample is None:
        calculate_total_gen_sample = len(prompts_list)
        chosen_idx = np.arange(0, calculate_total_gen_sample)
    else:
        np.random.seed(42)
        chosen_idx = np.arange(0, min(calculate_total_gen_sample, len(prompts_list)))
    
    

    for idx, curr_chosen_idx in enumerate(chosen_idx):
        prompt, img_path, rgb_video_path, pose_video_path = \
            prompts_list[curr_chosen_idx], \
            img_path_list[curr_chosen_idx], \
            color_video_path_list[curr_chosen_idx], \
            pose_video_path_list[curr_chosen_idx]        

        prompt = prompt[:226]
        origin_data_sort_idx = int(os.path.splitext(os.path.basename(img_path))[0].split("_")[-1])
        print(f"采样生成: idx: {idx}/{calculate_total_gen_sample}, prompt: {prompt}, img_path: {img_path}, rgb_video_path: {rgb_video_path}, pose_video_path: {pose_video_path}")
        # pred_color_video_00000, pred_hoi_traj_00000, pred_pose_video_00036, 
        # gt_hoi_traj_00000, 
        if prompt in same_prompts_dict:
            same_prompts_dict[prompt] += 1
        else:
            same_prompts_dict[prompt] = 0

        if os.path.exists(img_path):
            shutil.copyfile(img_path, os.path.join(data_to_eval_dir, f"{prompt}-{same_prompts_dict[prompt]}.png"))
        else:
            print(f"不存在 img_path: {img_path}")
        
        if gt_flag:
            sampled_rgb_video_path = rgb_video_path
        else: # pred_color_videos_416x624_00689
            sampled_rgb_video_path = os.path.join(sampled_results_dir, f"pred_color_videos_{img_hw[0]}x{img_hw[1]}_{origin_data_sort_idx:05d}.mp4")
        if os.path.exists(sampled_rgb_video_path):
            shutil.copyfile(sampled_rgb_video_path, os.path.join(data_to_eval_dir, f"{prompt}-{same_prompts_dict[prompt]}.mp4"))
        else:
            print(f"不存在: rgb_video_path: {sampled_rgb_video_path}")
    
    print(f"Done!")


if __name__ == "__main__":
    # list_test_data_sorted_idx(calculate_total_gen_sample=None, gt_flag=True)
    # make_test_data_for_animate_anyone(calculate_total_gen_sample=None, gt_flag=False)
    # make_test_data_for_easy_animate(calculate_total_gen_sample=None, gt_flag=False)
    # make_test_data_for_cogvideo(calculate_total_gen_sample=None, gt_flag=False)
    make_samples_for_sft_cogvideo(calculate_total_gen_sample=None, gt_flag=False)
    # make_test_data_for_hunyuan13b_and_wan21(calculate_total_gen_sample=None, gt_flag=False)
    pass