import sys
sys.path.append(".")
import os
import pandas as pd
import math
import natsort
import json

def prepare_train_colorvideo_and_posevideo_data_meta_json(
    # save_dir="/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/preprocessed_for_easy_animate",
    # save_dir="/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/preprocessed_for_easy_animate",
    save_dir = "/home/fit/liuyebin/WORK/lingweidang/datas/TACO_Data_20250314/preprocessed_for_easy_animate",
):
    data_root = "/home/fit/liuyebin/WORK/lingweidang/datas/TACO_Data_20250314/full_20k_plus"
    # data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    mode = "ablation_train10_test10" # ablation_train_50, ablation_train10_test10, base_2500_train_VLMEnhanced_prompts, base_2500_train, test_small_151
    color_video_column_path = f"{mode}_color_videos.txt"
    pose_video_column_path = f"{mode}_black_pose_videos.txt"
    prompts_column_path = f"{mode}_prompts.txt"

    all_color_video_paths = []
    all_pose_video_paths = []
    all_prompts = []
    with open(f"{data_root}/{color_video_column_path}", "r") as f:
        all_color_video_paths = f.readlines()
    
    with open(f"{data_root}/{pose_video_column_path}", "r") as f:
        all_pose_video_paths = f.readlines()
    
    with open(f"{data_root}/{prompts_column_path}", "r") as f:
        all_prompts = f.readlines()
    
    assert len(all_color_video_paths) == len(all_pose_video_paths) == len(all_prompts)

    # 开始保存json
    prepare_data_meta_json = []
    # color video
    for index, (vp, prompt) in enumerate(zip(all_color_video_paths, all_prompts)):
        vp = vp.strip()
        prompt = prompt.strip()
        print(f"index: {index}, vp: {vp}, prompt: {prompt}")

        data = {
            'file_path': vp, 
            'text': prompt, 
            'type': "video"
        }
        prepare_data_meta_json.append(data)
    
    # # pose video
    # for index, (vp, prompt) in enumerate(zip(all_pose_video_paths, all_prompts)):
    #     vp = vp.strip()
    #     prompt = prompt.strip()
    #     print(f"index: {index}, vp: {vp}, prompt: {prompt}")

    #     data = {
    #         'file_path': vp, 
    #         'text': prompt, 
    #         'type': "video"
    #     }
    #     prepare_data_meta_json.append(data)
    
    # 保存 json 文件
    # save_path = os.path.join(save_dir, f"{mode}_color_video_and_corresponding_pose_video.json")
    save_path = os.path.join(save_dir, f"{mode}_only_color_video.json")

    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    with open(save_path, 'w', encoding="utf-8") as pf:
        json.dump(prepare_data_meta_json, pf, indent=4)

    print(f"Done! 总共包含 {len(prepare_data_meta_json)} 条数据")

def prepare_train_repeat_100():
    save_dir="/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    os.makedirs(save_dir, exist_ok=True)

    data_root = "/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus"
    caption_column = "train_prompts.txt"
    image_column = "train_images_416x624.txt"
    color_video_column = "train_color_videos_416x624.txt"
    pose_video_column = "train_black_pose_videos_416x624.txt"
    pose_traj_column = "train_normalized_hoi_pose_trajs.txt"

    with open(f"{data_root}/{color_video_column}", "r") as f:
        all_color_video_paths = f.readlines()
    
    with open(f"{data_root}/{caption_column}", "r") as f:
        all_prompts = f.readlines()
    
    with open(f"{data_root}/{image_column}", "r") as f:
        all_first_frame_paths = f.readlines()
    
    assert len(all_color_video_paths) == len(all_prompts)

    data_sort_idx_repeat_100_list = [36, 592, 672, 1070, 1377, 1468, 1632, 1991, 2662, 2976, 3524, 4111, 4893, 5069, 5125, 5501, 5653, 5708, 5730, 5857, 5871, 5934, 6657, 6864, 7365, 7702, 7782, 7908, 8097, 8105, 8268, 8352, 8695, 9019, 9103, 9633, 9973, 10021, 10154, 10400, 10472, 10724, 10795, 10959, 11488, 12339, 12394, 12553, 12761, 13043, 13445, 13628, 14082, 14135, 14337, 14517, 14867, 15306, 15351, 15469, 15554, 15713, 15747, 15857, 16096, 16214, 16223, 16487, 16852, 17716, 17903, 18925, 18997, 19696, 19719, 20114, 20258, 20384, 20490, 20617, 20656, 20700, 21503, 21785, 21941, 22374, 22628, 22714, 22748, 22757, 22846, 22865, 23221, 23413, 23494, 23692, 23936, 24257, 24279, 24828]
    used_all_color_video_paths = [all_color_video_paths[item_idx] for item_idx in data_sort_idx_repeat_100_list]
    used_all_prompts = [all_prompts[item_idx] for item_idx in data_sort_idx_repeat_100_list]
    used_all_first_frame_paths = [all_first_frame_paths[item_idx] for item_idx in data_sort_idx_repeat_100_list]
    assert len(used_all_color_video_paths) == len(used_all_prompts) == len(used_all_first_frame_paths) == len(data_sort_idx_repeat_100_list) == 100

    # 开始保存json
    prepare_data_meta_json = []

    for index, (ip, vp, prompt) in enumerate(zip(used_all_first_frame_paths, used_all_color_video_paths, used_all_prompts)):
        ip = ip.strip()
        vp = vp.strip()
        prompt = prompt.strip()
        print(f"index: {index}, vp: {vp}, prompt: {prompt}")

        data = {
            'data_sort_idx': data_sort_idx_repeat_100_list[index],
            'first_frame_416x624_path': ip,
            'color_video_416x624_path': vp, 
            'prompt': prompt, 
        }
        prepare_data_meta_json.append(data)
    
    # 保存 json 文件
    save_path = os.path.join(save_dir, f"repeat100_from_train_25000_416x624.json")
    os.makedirs(os.path.dirname(save_path), exist_ok=True)
    with open(save_path, 'w', encoding="utf-8") as pf:
        json.dump(prepare_data_meta_json, pf, indent=4)

    print(f"Done! 总共包含 {len(prepare_data_meta_json)} 条数据")



if __name__== "__main__":
    prepare_train_colorvideo_and_posevideo_data_meta_json()
    # prepare_train_repeat_100()
    pass
    

