# -*- coding: utf-8 -*-
"""
完整的数据构造与 DataLoader 管道（HF datasets + parquet）
修复点：
1) set_transform 的函数（preprocess）同时支持“单条样本”和“批次样本”
2) test_transform 先转 tensor 再做 [-1,1] 归一化
3) train/test 变换仅处理“单张 PIL.Image”（批处理在 preprocess 中循环展开）
4) collate_fn 按键名堆叠
"""

import argparse
import os
import random
from functools import partial
from typing import Tuple, Dict, Any, List
from torch.utils.data import Dataset
from PIL.ImageOps import exif_transpose

import torch
from torch.utils.data import DataLoader, default_collate
from datasets import load_dataset
from torchvision import transforms

import torchvision.transforms as T
import torchvision.transforms.functional as TF
import kornia.color as K


from transformers import (
    CLIPTextModelWithProjection,
    CLIPTokenizer,
)


def rgb_to_grayscale(rgb_image: torch.Tensor):
    """
    支持 [C,H,W] 或 [B,C,H,W]，数值范围假设是 [-1,1] 或 [0,1] 都可。
    Kornia 的 rgb_to_grayscale 期望 [B,3,H,W]
    """
    if not isinstance(rgb_image, torch.Tensor):
        rgb_image = torch.tensor(rgb_image)

    orig_shape = rgb_image.shape
    if len(orig_shape) == 3:
        # [C,H,W] -> [1,C,H,W]
        rgb_image = rgb_image.unsqueeze(0)

    # 如果你想严格遵循 Kornia 对输入为 [0,1] 的建议，可把下面两行换成：
    gray = K.rgb_to_grayscale((rgb_image + 1.0) / 2.0)   # -> [B,1,H,W] in [0,1]
    gray = gray * 2.0 - 1.0
    # gray = K.rgb_to_grayscale(rgb_image)  # [B,1,H,W]

    if len(orig_shape) == 3:
        gray = gray.squeeze(0)  # [1,H,W]
    return gray

#########################################################


@torch.no_grad()
def tokenize_prompt(tokenizer, prompt, text_encoder_architecture='open_clip'): # only support open_clip and CLIP
    if text_encoder_architecture == 'CLIP' or text_encoder_architecture == 'open_clip':
        return tokenizer(
            prompt,
            truncation=True,
            padding="max_length",
            max_length=77,
            return_tensors="pt",
        ).input_ids
    elif text_encoder_architecture == 'CLIP_T5_base': # we have two tokenizers, 1st for CLIP, 2nd for T5
        input_ids = []
        input_ids.append(tokenizer[0](
            prompt,
            truncation=True,
            padding="max_length",
            max_length=77,
            return_tensors="pt",
        ).input_ids)
        input_ids.append(tokenizer[1](
            prompt,
            truncation=True,
            padding="max_length",
            max_length=512,
            return_tensors="pt",
        ).input_ids)
        return input_ids
    else:
        raise ValueError(f"Unknown text_encoder_architecture: {text_encoder_architecture}")

def process_image(image, image_seg, size, Norm=False, hps_score = 6.0): 
    # image: <class 'PIL.Image.Image'>
    image = exif_transpose(image)
    image_seg = exif_transpose(image_seg)

    if not image.mode == "RGB":
        image = image.convert("RGB")
    if not image_seg.mode == "RGB":
        image_seg = image_seg.convert("RGB")

    orig_height = image.height
    orig_width = image.width

    image = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR)(image)
    image_seg = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR)(image_seg)

    c_top, c_left, _, _ = transforms.RandomCrop.get_params(image, output_size=(size, size))
    image = transforms.functional.crop(image, c_top, c_left, size, size)
    image = transforms.ToTensor()(image)
    image = image * 2.0 - 1.0
    
    image_seg = transforms.functional.crop(image_seg, c_top, c_left, size, size)
    image_seg = transforms.ToTensor()(image_seg)
    image_seg = image_seg * 2.0 - 1.0

    image_gray = rgb_to_grayscale(image) 
    
    if Norm:
        image = transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True)(image)

    micro_conds = torch.tensor(
        [orig_width, orig_height, c_top, c_left, hps_score],
    )

    return {"image": image, "image_gray":image_gray, "micro_conds": micro_conds, "image_seg":image_seg}

def get_dataloader_COCO(
                   tokenizer,
                   args,
                   split: str = "train",
    ) -> DataLoader:
    """
    读取 parquet 分片，注册 transform，构造 PyTorch DataLoader
    期望 parquet 里有列：image, conditioning_image（均为 PIL 或可被 datasets 解成 PIL）
    """
    dataset = COCODataset(
        hf_dataset=load_dataset(args.instance_data_dir, split="train"),  # something like '../parquets_father_dir/'
        tokenizer=tokenizer,
        image_key=args.image_key,
        image_seg_key='conditioning_image',
        prompt_key=args.prompt_key,
        use_seg = args.use_seg,
        prompt_prefix=args.prompt_prefix,
        size=args.resolution,
        text_encoder_architecture=args.text_encoder_architecture,
    )

    train_dataloader = DataLoader(
        dataset,
        batch_size=args.train_batch_size,
        shuffle=True,
        num_workers=args.dataloader_num_workers,
        collate_fn=default_collate,
        pin_memory=True,
    )

    print(f"[Info] {split} dataset loaded with {len(dataset)} samples.")
    return train_dataloader

class COCODataset(Dataset):
    def __init__(
        self,
        hf_dataset,
        tokenizer,
        image_key,
        image_seg_key,
        prompt_key,
        use_seg,
        prompt_prefix=None,
        size=512,
        text_encoder_architecture='CLIP',
    ):
        self.size = size
        self.image_key = image_key
        self.image_seg_key = image_seg_key
        self.prompt_key = prompt_key
        self.tokenizer = tokenizer
        self.use_seg = use_seg
        self.hf_dataset = hf_dataset
        self.prompt_prefix = prompt_prefix
        self.text_encoder_architecture = text_encoder_architecture

    def __len__(self):
        return len(self.hf_dataset)

    def __getitem__(self, index):
        item = self.hf_dataset[index]

        rv = process_image(item[self.image_key], item[self.image_seg_key],self.size)
        # if self.use_seg:
        #     rv["image_seg"]=process_image_seg(item[self.image_seg_key],self.size)

        prompt = item[self.prompt_key]

        if self.prompt_prefix is not None:
            prompt = self.prompt_prefix + prompt

        if isinstance(self.tokenizer, list):
            _tmp_ = tokenize_prompt(self.tokenizer, prompt, self.text_encoder_architecture)
            rv["prompt_input_ids"] = [_tmp_[0][0],_tmp_[1][0]]
        else:
            rv["prompt_input_ids"] = tokenize_prompt(self.tokenizer, prompt, self.text_encoder_architecture)[0]

        return rv


# =========================
# 简单自测
# =========================
if __name__ == "__main__":
    def parse_args():
        parser = argparse.ArgumentParser()

        parser.add_argument(
            "--text_encoder_architecture",
            type=str,
            default="open_clip",
            required=False,
            help="The architecture of the text encoder. One of ['CLIP', 'open_clip', 'flan-t5-base','Qwen2-0.5B','gemini-2b',long_CLIP_T5_base','CLIP_T5_base']",
        )
        parser.add_argument("--use_seg", action="store_true", help="Whether to use segmentation image.")
        parser.add_argument(
            "--instance_data_dir",
            type=str,
            default="/home/101/u101004/user_gbh/data/",
            required=False,
            help="A folder containing the training data of instance images.",
        )

        parser.add_argument(
            "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
        )

        parser.add_argument(
            "--resolution",
            type=int,
            default=512,
            help=(
                "The resolution for input images, all the images in the train/validation dataset will be resized to this"
                " resolution"
            ),
        )
        parser.add_argument("--image_key", default="image", type=str, required=False)
        parser.add_argument("--prompt_key", default="text", type=str, required=False)

        parser.add_argument("--prompt_prefix", type=str, required=False, default=None)

        parser.add_argument(
            "--dataloader_num_workers",
            type=int,
            default=0,
            help=(
                "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
            ),
        )

        args = parser.parse_args()


        return args
    args = parse_args()

    tokenizer = CLIPTokenizer.from_pretrained(
        "/home/101/u101004/.cache/huggingface/hub/models--MeissonFlow--Meissonic/snapshots/7ee5068bcebaaf6165e5d77c2969aa3b38e17b38/", subfolder="tokenizer", variant=None
    )
    print(f"args.use_seg{args.use_seg}")

    dl = get_dataloader_COCO(
        tokenizer = tokenizer,
        args = args,
        split = "train",
    )

    for step, batch in enumerate(dl):
        print(f"batch.keys():{batch.keys()}")
        x = batch["image"]            # [B,3,H,W]
        print(f"x.shape:{x.shape}")
        x_gray = batch["image_gray"]
        print(f"x_gray.shape:{x_gray.shape}")
        micro_conds = batch["micro_conds"]
        print(f"micro_conds.shape:{micro_conds.shape}")
        prompt_input_ids = batch["prompt_input_ids"]
        print(f"prompt_input_ids.shape:{prompt_input_ids.shape}")
        print(f"prompt_input_ids:{prompt_input_ids[0]}")

        print(f"step={step} | original={tuple(x.shape)} | gray={tuple(x_gray.shape)}")
        if "seg_img" in batch:
            s = batch["seg_img"]
            print(f"           seg     ={tuple(s.shape)}")

        break
