import json
import random
import itertools
import os
import random

import numpy as np
import cv2
import torch
import torchvision.transforms as transforms
from decord import VideoReader
from torchvision.utils import save_image
from PIL import Image
from torch.utils.data import Dataset
from transformers import CLIPImageProcessor
from tqdm import tqdm

from utils import _55_to_01,create_logger

logger = create_logger('dataset')

def resize_image(input_image:np.ndarray|Image.Image, resolution=512)->np.ndarray:
    """按照图像的最短边计算缩放比例，然后将宽高按比例缩放，最后四舍五入到最接近64整数倍的值"""
    if isinstance(input_image,Image.Image):
        input_image = np.array(input_image)
    H, W, C = input_image.shape
    if H==768 and W==768:
        #logger.info("图片样本")
        return input_image
    H = float(H)
    W = float(W)
    k = float(resolution) / min(H, W)
    H *= k
    W *= k
    H = int(np.round(H / 64.0)) * 64
    W = int(np.round(W / 64.0)) * 64
    img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
    return img

def get_data_pair(root_path,batch_size=1):
    """
    
    Return:
    [ [((gen,p),(ref,p)),...,((gen,p),(ref,p))],...,[],obj ]
      一个obj：[((gen,p),(ref,p)),...,((gen,p),(ref,p))]
      一个pair：((gen,p),(ref,p))
    """
    # 根目录
    frames_dir = root_path+'/frames'
    poses_dir = root_path+'/poses'

    res = []
    # 遍历每个 video 文件夹
    #print(os.listdir(poses_dir))
    for video in os.listdir(poses_dir): # 每个video都会提供多个batch,一个batch里的样本数由batch_size决定
        frames_video_path = frames_dir+"/" + video
        poses_video_path = poses_dir+"/" + video
        #print(frames_video_path)
        # 确保两个文件夹都存在
        if not (os.path.isdir(frames_video_path) and os.path.isdir(poses_video_path)):
            continue

        # 获取两个文件夹中的 png 文件集合
        frames_pngs = set(f for f in os.listdir(frames_video_path) if f.endswith('.jpg') or f.endswith('.png'))
        poses_pngs = set(f for f in os.listdir(poses_video_path) if f.endswith('.jpg') or f.endswith('.png'))
        #print(frames_pngs)
        #print(poses_pngs)
        # 取交集，找到共有的 png 文件（必要，因为有的pose太差，会被删去）
        common_pngs = list(frames_pngs & poses_pngs) # ["png1",...,"png2"]

        if len(common_pngs) < 2:
            continue
        # TODO: 如果只有单张图片，则组成空白对
        
        pair_permutations = list(itertools.permutations(common_pngs, 2)) # 所有组合 [("png1","png2"),...,("png2","png3")]
        logger.info(f"{video}的组合数：{len(pair_permutations)}")
           

        
        groups = [name_pair for name_pair in pair_permutations] # [ [("png1","png2"),...,("png2","png3")],..., [("png2","png3"),...,("png3","png4") ] 
        # print("group:",groups[:2])
        _item = [] # 一个obj
        for bi in groups: 
            gen_path = frames_video_path+"/"+bi[0]
            gen_pose_path = poses_video_path+"/"+bi[0]
            ref_path = frames_video_path+"/"+bi[1]
            ref_pose_path = poses_video_path+"/"+bi[1]

            _item.append(((gen_path,gen_pose_path),(ref_path,ref_pose_path))) # ((gen,p),(ref,p))
        if len(_item)==0:
            raise ValueError(f'_item is too short,{groups}')
        res.append(_item) # _item [((gen,p),(ref,p)),...,()]
    
    return res

def get_obj_list(root_path):
    frames_dir = root_path+'/frames'
    poses_dir = root_path+'/pose'

    
     
    # 保证该对象是有数据的
    obj_list = [name for name in os.listdir(poses_dir) if len(os.listdir(f"{poses_dir}/{name}"))>0]

    return obj_list




class PoseDataset(Dataset):
    def __init__(
        self,
        root_path="data/output",
        batch_size=1,
        pose_only_prob=0
    ):
        super().__init__()
        self.pose_only_prob = pose_only_prob # 条件
        self.batch_size = batch_size
        logger.info(f"batch_size：{batch_size}")
        logger.info(f"Pose-only (blank) probability: {self.pose_only_prob * 100}%")
        #self.frames_dir = root_path+'/frames'
        #self.poses_dir = root_path+'/pose'

        self.pair_pool = get_data_pair(root_path,batch_size)
        logger.info(f"对象数：{len(self.pair_pool)}")

        self.feature_extractor = CLIPImageProcessor.from_pretrained("configs", subfolder="feature_extractor")
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.5], [0.5])
        ])
    
    def _load_image(self, path):
        img = Image.open(path).convert('RGB')
        return np.array(img)

    def __getitem__(self, index):
        obj_pairs = self.pair_pool[index] 
        if len(obj_pairs) < self.batch_size: # 确保有足够的样本可以抽取
            # 如果不够，可以重复采样或抛出错误，这里选择重复采样
            sel_pair = random.choices(obj_pairs, k=self.batch_size)
        else:
            sel_pair = random.sample(obj_pairs, self.batch_size)

        gen_img_list = []
        gen_img_pose_list = []
        ref_img_list = []
        ref_img_pose_list = []
        ref_clip_img_list = []

        # =========================================================================================
        #   核心修改部分：根据概率决定是否进入“仅姿态”模式
        # =========================================================================================
        if random.random() < self.pose_only_prob:
            # --- 一定 的概率进入此分支：只提供 gen_pose, 其他信息设为空白（全零张量）---
            # logger.info("Entering pose-only (blank) mode for this item.")
            for bi in sel_pair:
                # 1. 正常加载和处理 gen_img_pose
                gen_img_pose = self._load_image(bi[0][1])
                gen_img_pose = resize_image(gen_img_pose, 512)
                gen_img_pose = self.transform(gen_img_pose) # Shape: (3, H, W)
                gen_img_pose_list.append(gen_img_pose)

                # 2. 获取处理后图像的形状
                _, H, W = gen_img_pose.shape
                
                # 3. 创建与图像形状一致的全零张量作为“空白”信息
                blank_image_tensor = torch.zeros(3, H, W, dtype=gen_img_pose.dtype)
                gen_img_list.append(blank_image_tensor)
                ref_img_list.append(blank_image_tensor)
                ref_img_pose_list.append(blank_image_tensor)

                # 4. 创建与CLIP特征形状一致的全零张量
                clip_size = (224,224)
                #print(clip_size)
                blank_clip_tensor = torch.zeros(
                    (3, clip_size[0], clip_size[1]), 
                    dtype=torch.float32 # CLIP pixel_values 通常是 float32
                )
                ref_clip_img_list.append(blank_clip_tensor)

        else:
            # --- 80% 的概率进入此分支：执行原始的完整加载流程 ---
            for bi in sel_pair:
                # 加载和处理 gen_img 和 gen_img_pose
                gen_img = self._load_image(bi[0][0])
                gen_img_pose = self._load_image(bi[0][1])
                gen_img = resize_image(gen_img, 512)
                gen_img_pose = resize_image(gen_img_pose, 512)
                gen_img = self.transform(gen_img)
                gen_img_pose = self.transform(gen_img_pose)
                gen_img_list.append(gen_img)
                gen_img_pose_list.append(gen_img_pose)
                
                # 加载和处理 ref_img 和 ref_img_pose
                ref_img = self._load_image(bi[1][0])
                ref_img_pose = self._load_image(bi[1][1])
                ref_img = resize_image(ref_img, 512)
                ref_img_pose = resize_image(ref_img_pose, 512)
                ref_img = self.transform(ref_img)
                ref_img_pose = self.transform(ref_img_pose)
                ref_img_list.append(ref_img)
                ref_img_pose_list.append(ref_img_pose)

                # 提取 CLIP 特征
                ref_clip_img = self.feature_extractor(
                    images=Image.open(bi[1][0]).convert('RGB'), return_tensors='pt'
                ).pixel_values[0]
                ref_clip_img_list.append(ref_clip_img)
     

        
        return {
            "gen_img": torch.stack(gen_img_list),
            "gen_img_pose":torch.stack(gen_img_pose_list),
            "ref_img": torch.stack(ref_img_list),
            "ref_img_pose": torch.stack(ref_img_pose_list),
            "ref_clip_img": torch.stack(ref_clip_img_list),
        }
        

    def __len__(self):
        return len(self.pair_pool)


if __name__ == "__main__":
    train_dataset=PoseDataset(batch_size=1)
  
    train_dataloader = torch.utils.data.DataLoader(
        train_dataset, batch_size=1, shuffle=True, num_workers=4,collate_fn=lambda x: x[0]
    ) # 不在dataloader中设置batch_size.
      #  collate_fn的输入是list，输出是自定义的batch堆叠方式

    epochs = 10 
    # 遍历
    for step, batch in enumerate(train_dataloader):
        if step>0:
            break
            #print(batch)
            #print(batch["gen_img"])
        print(step)
        save_image(torch.cat([_55_to_01(batch["gen_img"]),_55_to_01(batch["gen_img_pose"]),_55_to_01(batch["ref_img"]),_55_to_01(batch["ref_img_pose"])],dim=3),f"outputs/dataset-{step}.png")
                    
        save_image((batch["ref_clip_img"][0] / 2 + 0.5).clamp(0, 1),f"outputs/dataset_ref_clip_img-{step}.png")
     




