# -*- coding: utf-8 -*-
"""
# @FileName:     taco_dataset_v5.py
# @AuthorName:   Sanqi Lu (Lingwei Dang)
# @Institution:  SCUT, Guangzhou, China
# @EmailAddress: lenvondang@163.com
# @CreateTime:   2024/12/12 13:47
"""


import os
# os.chdir("/share/home/wuqingyao_danglingwei/codes/mdm_pose")
import sys

sys.path.append(".")
# import debugpy; debugpy.connect(('172.31.11.130', 5678))

import json
import pickle

import natsort
import numpy
from torch.utils.data import DataLoader, Dataset
import numpy as np
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from PIL import Image
import imageio

try:
    from torchvision.transforms import InterpolationMode

    BICUBIC = InterpolationMode.BICUBIC
except ImportError:
    BICUBIC = Image.BICUBIC

def _convert_image_to_rgb(image):
    return image.convert("RGB")

class TACO_Dataset_HOI(Dataset):
    # 引入了HOI区域图像裁剪
    def __init__(
            self,
            data_root="/share/home/wuqingyao_danglingwei/datas/TACO_Data_20250314/full_20k_plus",
            prompts_column_path="base_2500_train_VLMEnhanced_prompts.txt",
            img_column_path="base_2500_train_images.txt",
            hoi_traj_path="base_2500_train_normalized_hoi_pose_trajs.txt",
            debug=False
    ):
        self.debug = debug

        self.data_root = data_root
        prompts = []
        with open(os.path.join(data_root, prompts_column_path), "r", encoding="utf-8") as f:
            for line in f:
                prompts.append(line.strip())
        
        self.prompts = prompts

        img_paths = []
        with open(os.path.join(data_root, img_column_path), "r", encoding="utf-8") as f:
            for line in f:
                img_paths.append(os.path.join(data_root, line.strip()))
        self.img_paths = img_paths

        hoi_traj_paths = []
        with open(os.path.join(data_root, hoi_traj_path), "r", encoding="utf-8") as f:
            for line in f:
                hoi_traj_paths.append(os.path.join(data_root, line.strip()))
        
        self.hoi_traj_paths = hoi_traj_paths

        assert len(self.prompts) == len(self.img_paths) == len(self.hoi_traj_paths)

        if self.debug:
            self.hoi_traj_paths = self.hoi_traj_paths[:16]
            self.prompts = self.prompts[:16]
            self.img_paths = self.img_paths[:16]

        self.img_transforms = Compose([
            Resize([224, 224], interpolation=BICUBIC),
            # CenterCrop(image_size),
            _convert_image_to_rgb,
            ToTensor(),
            Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)),
        ])
        
    def __getitem__(self, index):
        hoi_traj_path = self.hoi_traj_paths[index]
        # img_path = self.img_paths[index]
        # prompt = self.prompts[index]

        pose_traj = np.load(hoi_traj_path)
        # # 打开视频图像
        # pilimg = Image.open(img_path).convert("RGB")
        # transformed_image = self.img_transforms(pilimg)  # [3, 224, 224]

        return pose_traj # , prompt, transformed_image

    def __len__(self):
        return len(self.prompts)


if __name__ == '__main__':
    ds = TACO_Dataset_HOI()
    print(f"ds: {len(ds)}")
    dl = DataLoader(dataset=ds, batch_size=2, num_workers=2, shuffle=False)
    print(f"dl: {len(dl)}")
    for (pose_traj, prompts, transformed_image) in dl:
        # [b, 2*17, 3, t], [text list], [b, 3, 224, 224]
        print(f"pose_traj: {pose_traj.shape}, {pose_traj.dtype}, transformed_image: {transformed_image.shape}, {transformed_image.dtype}, prompts: {len(prompts)}")
        for item in prompts:
            print(f"{item}")
        break

