import json
import random
import itertools

import numpy as np
import cv2
import torch
import torchvision.transforms as transforms
from decord import VideoReader
from torchvision.utils import save_image
from PIL import Image
from torch.utils.data import Dataset
from transformers import CLIPImageProcessor
from tqdm import tqdm

def resize_image(input_image:np.ndarray|Image.Image, resolution=512)->np.ndarray:
    """按照图像的最短边计算缩放比例，然后将宽高按比例缩放，最后四舍五入到最接近64整数倍的值"""
    if isinstance(input_image,Image.Image):
        input_image = np.array(input_image)
    H, W, C = input_image.shape
    H = float(H)
    W = float(W)
    k = float(resolution) / min(H, W)
    H *= k
    W *= k
    H = int(np.round(H / 64.0)) * 64
    W = int(np.round(W / 64.0)) * 64
    img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
    return img

def get_data_path(root_path,batch_size=1):
    """
    
    Return:
        batch=1: [ [((gen,p),(ref,p))],...,[((gen,p),(ref,p))],...]
        batch=2: [ [((gen,p),(ref,p)),((gen,p),(ref,p))],...,[((gen,p),(ref,p)),((gen,p),(ref,p))],...]
        每个batch都是相同人物的图片
    """
    import os
    import random

    # 
    min_num_samples = batch_size+1

    # 根目录
    base_dir = root_path
    frames_dir = os.path.join(base_dir, 'frames')
    poses_dir = os.path.join(base_dir, 'poses')

    res = []
    # 遍历每个 video 文件夹
    for video in os.listdir(frames_dir): # 每个video都会提供多个batch,一个batch里的样本数由batch_size决定
        frames_video_path = os.path.join(frames_dir, video)
        poses_video_path = os.path.join(poses_dir, video)
        #print(frames_video_path)
        # 确保两个文件夹都存在
        if not (os.path.isdir(frames_video_path) and os.path.isdir(poses_video_path)):
            continue

        # 获取两个文件夹中的 png 文件集合
        frames_pngs = set(f for f in os.listdir(frames_video_path) if f.endswith('.jpg'))
        poses_pngs = set(f for f in os.listdir(poses_video_path) if f.endswith('.jpg'))
        #print(frames_pngs)
        #print(poses_pngs)
        # 取交集，找到共有的 png 文件（必要，因为有的pose太差，会被删去）
        common_pngs = list(frames_pngs & poses_pngs) # ["png1",...,"png2"]
        pair_permutations = list(itertools.permutations(common_pngs, 2)) # 所有组合 [("png1","png2"),...,("png2","png3")]
        print("组合数：", len(pair_permutations))
           
        if len(pair_permutations)<batch_size: # 不够batch_size，跳过
            continue
        # 选取的组合数，batch_size的整数倍
        sample_size = (len(pair_permutations)//batch_size)*batch_size
        
        # 随机选取若干组，
        selected_pngs = random.sample(pair_permutations, sample_size) # [("png1","png2"),...,("png2","png3")]
        print("选择组合数：",len(selected_pngs))
        #print(selected_pngs[:10])
        print(f"在 {video} 中随机选择了 {sample_size} 组数据，来自{len(pair_permutations)}组数据\n")
        #one_video_data_pair = []
        # 按batch分组
        
        groups = [selected_pngs[i:i + batch_size] for i in range(0, len(selected_pngs), batch_size)] # [ [("png1","png2"),...,("png2","png3")],..., [("png2","png3"),...,("png3","png4") ] 
        #print(groups[:10])
        for batch in groups: # batch [(),...,()]
            _item = []
            for bi in batch: # bi (png1,png2)
                gen_path = os.path.join(frames_video_path, bi[0])
                gen_pose_path = os.path.join(poses_video_path, bi[0])
                ref_path = os.path.join(frames_video_path, bi[1])
                ref_pose_path = os.path.join(poses_video_path, bi[1])

                _item.append(((gen_path,gen_pose_path),(ref_path,ref_pose_path))) # ((gen,p),(ref,p))
            res.append(_item) # _item [((gen,p),(ref,p)),...,()]

    return res

class PoseDataset(Dataset):
    def __init__(
        self,
        root_path="data/output",
        batch_size=1,
    ):
        super().__init__()

        self.batch_size = batch_size
        self.clip_image_processor = CLIPImageProcessor()
        self.data_pairs = get_data_path(root_path,batch_size)
        print("batch数量：",len(self.data_pairs))
        print(self.data_pairs[:5])
        self.feature_extractor = CLIPImageProcessor.from_pretrained("configs", subfolder="feature_extractor")
        self.transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.5], [0.5])
        ])
    
    def _load_image(self, path):
        img = Image.open(path).convert('RGB')
        return np.array(img)

    def __getitem__(self, index):
        data_pair = self.data_pairs[index] # data_pair 一个batch []
        gen_img_list = []
        gen_img_pose_list = []
        ref_img_list = []
        ref_img_pose_list = []
        ref_clip_img_list = []
        for i in range(self.batch_size):
            _batch_i = data_pair[i] # ((gen,p),(ref,p))
            gen_img = self._load_image(_batch_i[0][0])
           # print(gen_img.shape) # (H,W,3)
            gen_img_pose = self._load_image(_batch_i[0][1])
            gen_img = resize_image(gen_img, 512) # ()
            gen_img_pose = resize_image(gen_img_pose, 512)
            gen_img = self.transform(gen_img)
            gen_img_pose = self.transform(gen_img_pose)
           # print(gen_img.shape) # (3,h,w)

            gen_img_list.append(gen_img)
            gen_img_pose_list.append(gen_img_pose)
            #print("参考路径：",_batch_i[1][0],_batch_i[1][1])
            ref_img = self._load_image(_batch_i[1][0])
            ref_img_pose = self._load_image(_batch_i[1][1])
            ref_img = resize_image(ref_img, 512)
            ref_img_pose = resize_image(ref_img_pose, 512)
            ref_img = self.transform(ref_img)
            ref_img_pose = self.transform(ref_img_pose)

            ref_img_list.append(ref_img)
            ref_img_pose_list.append(ref_img_pose)


             # 提取 CLIP 特征
            ref_clip_img = self.feature_extractor(
                images=Image.open(_batch_i[1][0]).convert('RGB'), return_tensors='pt'
            ).pixel_values[0]

            ref_clip_img_list.append(ref_clip_img)
        #print("shape:",torch.stack(gen_img_list).shape)
     

        
       

        return {
            "gen_img": torch.stack(gen_img_list),
            "gen_img_pose":torch.stack(gen_img_pose_list),
            "ref_img": torch.stack(ref_img_list),
            "ref_img_pose": torch.stack(ref_img_pose_list),
            "ref_clip_img": torch.stack(ref_clip_img_list),
        }
        

    def __len__(self):
        return len(self.data_pairs)


if __name__ == "__main__":
    train_dataset=PoseDataset(batch_size=1)
    train_dataloader = torch.utils.data.DataLoader(
        train_dataset, batch_size=1, shuffle=True, num_workers=4,collate_fn=lambda x: x[0]
    ) # 不在dataloader中设置batch_size.
      #  collate_fn的输入是list，输出是自定义的batch堆叠方式

    epochs = 10 
    # 遍历
    for step, batch in enumerate(train_dataloader):
        if step>0:
            break
            #print(batch)
            #print(batch["gen_img"])
        print(step)
        save_image((batch["gen_img"][0] / 2 + 0.5).clamp(0, 1),f"outputs/gen_img-{step}.png")
        save_image((batch["gen_img_pose"][0] / 2 + 0.5).clamp(0, 1),f"outputs/gen_img_pose-{step}.png")
        save_image((batch["ref_img"][0] / 2 + 0.5).clamp(0, 1),f"outputs/ref_img-{step}.png")
        save_image((batch["ref_img_pose"][0] / 2 + 0.5).clamp(0, 1),f"outputs/ref_img_pose-{step}.png")
        save_image((batch["ref_clip_img"][0] / 2 + 0.5).clamp(0, 1),f"outputs/ref_clip_img-{step}.png")
     




