import torch
from torch.utils.data import Dataset, ConcatDataset, IterableDataset
from pathlib import Path
import json
import numpy as np
from loguru import logger
import lmdb
import pickle
from reflow.data.utils import get_image_transforms
from glob import glob
from reflow.data.utils import LMDB_ndarray
import random
from tqdm.auto import tqdm
import os
from pathlib import Path


def tokenize_caption_old(caption, tokenizer=None):
    if tokenizer == None:
        return {
            "caption": caption,
        }

    tokens_pt = tokenizer(
        caption,
        padding="max_length",
        max_length=tokenizer.model_max_length,
        truncation=True,
        return_tensors="pt",
    )
    for k, v in tokens_pt.items():
        tokens_pt[k] = v.squeeze()
    return tokens_pt


def get_reflow_dataset(
    data_root,
    tokenizer=None,
    src_type="npy",
    train=False,
    random_flip=False,
    p_uncond=0.0,
):
    assert src_type in ["npy", "lmdb"]
    ds_clsx = {
        "npy": DataPairsWithText,
        "lmdb": DataPairsWithTextLMDB,
    }
    image_transforms = get_image_transforms(train=train, random_flip=random_flip)
    ds_cls = ds_clsx[src_type]
    data_root = Path(data_root)
    if (data_root / "index.json").exists():
        ds = ds_cls(
            data_root=str(data_root),
            image_transforms=image_transforms,
            tokenizer=tokenizer,
            p_uncond=p_uncond,
        )
    else:
        # 需要加载合并所有的 part
        proot_list = sorted(glob(str(data_root / "part*")))  # 检测所有的 part 文件夹
        proot_list = [
            proot for proot in proot_list if Path(proot).is_dir()
        ]  # 防止有别的文件类型 (比如 txt) ; 筛选只保留文件夹格式
        logger.info(f"find {len(proot_list)} different parts : {proot_list}")
        ds_list = []
        for proot in proot_list:
            try:
                ds_list.append(
                    ds_cls(
                        data_root=str(proot),
                        image_transforms=image_transforms,
                        tokenizer=tokenizer,
                        p_uncond=p_uncond,
                    )
                )
            except:
                logger.warning(f"{proot} no {src_type}")
        ds = ConcatDataset(ds_list)
    logger.info(f"{len(ds)} items in total")
    return ds


class DataPairsWithTextLMDB(Dataset):
    def __init__(self, data_root, image_transforms=None, tokenizer=None, p_uncond=0.0):
        data_root = Path(data_root)
        self.index_info = json.load(open(str(data_root / "index.json"), "r"))
        logger.info(f"dataset basic info:\n{self.index_info}")

        content_dir = data_root / "content"
        self.lmdb_dir = content_dir / "lmdb"
        self.env = lmdb.open(
            str(self.lmdb_dir),
            subdir=self.lmdb_dir.is_dir(),
            readonly=True,
            lock=False,
            readahead=False,
            meminit=False,
        )
        with self.env.begin() as txn:
            self.length = pickle.loads(txn.get(b"__len__"))
            self.keys = pickle.loads(txn.get(b"__keys__"))

        self.all_caps = open(str(content_dir / "captions.txt"), "r").read().splitlines()
        self.image_transforms = image_transforms
        self.tokenizer = tokenizer
        self.p_uncond = p_uncond

    def __getitem__(self, index):
        env = self.env
        with env.begin() as txn:
            byteflow = txn.get(self.keys[index])
        lmdb_array = pickle.loads(byteflow)
        array = lmdb_array.resume_array().copy().astype(np.float32)

        example = {}
        example["noise"], example["latent"] = array  # 2 * (d, h, w) np.ndarray
        if self.image_transforms:
            example["noise"] = self.image_transforms(example["noise"])
            example["latent"] = self.image_transforms(example["latent"])
        caption = self.all_caps[index]
        if random.random() < self.p_uncond:
            caption = ""
        tokens_pt = tokenize_caption_old(caption, self.tokenizer)
        example = {**example, **tokens_pt}
        return example

    def __len__(self):
        return self.length


class DataPairsWithText(Dataset):
    def __init__(
        self, data_root, image_transforms=None, tokenizer=None, p_uncond=0.0
    ) -> None:
        super().__init__()
        data_root = Path(data_root)
        self.index_info = json.load(open(str(data_root / "index.json"), "r"))
        logger.info(f"dataset basic info:\n{self.index_info}")

        content_dir = data_root / "content"
        self.image_dir = content_dir / "images"
        self.all_caps = open(str(content_dir / "captions.txt"), "r").read().splitlines()
        self.nums = len(self.all_caps)

        self.image_transforms = image_transforms
        self.tokenizer = tokenizer
        self.p_uncond = p_uncond

    def __len__(self):
        return self.nums

    def __getitem__(self, i):
        pair = np.load(str(self.image_dir / f"{i}.npy")).astype(np.float32)
        example = {}
        example["noise"], example["latent"] = pair  # 2 * (d, h, w) np.ndarray
        if self.image_transforms:
            example["noise"] = self.image_transforms(example["noise"])
            example["latent"] = self.image_transforms(example["latent"])
        caption = self.all_caps[i]
        if random.random() < self.p_uncond:
            caption = ""
        tokens_pt = tokenize_caption_old(caption, self.tokenizer)
        example = {**example, **tokens_pt}
        return example

    def add_transforms_(self, image_transforms=None, tokenizer=None):
        if image_transforms:
            self.image_transforms = image_transforms
        if tokenizer:
            self.tokenizer = tokenizer


# # ! deprecated
# def collate_fn(examples, tokenizer):
#     noise = torch.stack([example["noise"] for example in examples])
#     noise = noise.to(memory_format=torch.contiguous_format).float()
#     latent = torch.stack([example["latent"] for example in examples])
#     latent = latent.to(memory_format=torch.contiguous_format).float()

#     input_ids = [example["input_ids"] for example in examples]
#     padded_tokens = tokenizer.pad(
#         {"input_ids": input_ids}, padding=True, return_tensors="pt")

#     return {
#         "noise": noise,
#         "latent": latent,
#         "input_ids": padded_tokens.input_ids,
#         "attention_mask": padded_tokens.attention_mask,
#     }


class ListDataset(IterableDataset):
    def __init__(
        self,
        data_list,
        random_seed=0,
        device_index=0,
    ):
        super().__init__()
        self.data_list = data_list

        self.base_random_seed = random_seed
        self.device_index = device_index

    def random_seed(self, worker_id=0, num_workers=1):
        return self.base_random_seed + self.device_index * num_workers + worker_id

    def random_loop(self, random_seed):
        random.seed(random_seed)
        while True:
            rnd_idx = random.randint(0, len(self.data_list) - 1)
            prompt = self.data_list[rnd_idx]
            example = {"prompt": prompt}
            yield example

    def __iter__(self):
        # 获取当前 worker 的信息
        worker_info = torch.utils.data.get_worker_info()
        # 如果有多个 worker，则根据 worker 的 id 和数量来划分列表的子集
        if worker_info is not None:
            random_seed = self.random_seed(worker_info.id, worker_info.num_workers)
        else:
            random_seed = self.random_seed()
        return self.random_loop(random_seed)


class PromptsDataset(ListDataset):
    def __init__(
        self,
        caption_path,
        random_seed=0,
        device_index=0,
    ):
        caption_path = Path(caption_path)

        if caption_path.suffix == ".txt":
            prompts = caption_path.open("r").read().splitlines()
        else:
            raise (NotImplementedError(f"not supported suffix {caption_path.suffix}"))

        super().__init__(prompts, random_seed, device_index)


# coco 数据集保证长边 <= 640px
import torchvision
import torchvision.transforms as T

def tokenize_caption(caption:str, tokenizer):
    # NOTE : 一些错误处理 ; 数据中可能有 bug
    if caption==None:
        caption=""
    elif isinstance(caption, list):
        caption=' '.join([w for w in caption if isinstance(w,str)])
    # 保证 caption 以 str 的格式出现
    tokenized = tokenizer(
        caption, max_length=tokenizer.model_max_length, padding="max_length", truncation=True, return_tensors="pt"
    )
    for k,v in tokenized.items():
        tokenized[k]=v.squeeze()
    return tokenized

class CocoCaption(Dataset):
    def __init__(self, data_root, version="2014", phase="train", image_transforms=None, tokenizer=None, use_hf_key_format=False, p_uncond=0.1):
        """version : 2014 , 2017
        phase : train, val
        """
        image_dir = os.path.join(data_root, f"{phase}{version}")
        json_file_path = os.path.join(data_root, "annotations", f"captions_{phase}{version}.json")

        if image_transforms == None:
            image_transforms = T.Compose(
                [
                    T.ToTensor(),
                ]
            )
        self._coco_ds = torchvision.datasets.CocoCaptions(
            root=image_dir, annFile=json_file_path, transform=image_transforms
        )
        self.tokenizer=tokenizer
        self.use_hf_key_format=use_hf_key_format
        self.p_uncond=p_uncond
        

    def __len__(self):
        return len(self._coco_ds)

    def __getitem__(self, i):
        image, captions = self._coco_ds[i]
        
        if random.random() < self.p_uncond:
            caption=""
        else:
            caption = captions[random.randint(0, len(captions)-1)] 
            # TODO : 可能需要对 caption 做清理 (coco caption 有不干净的情况)
            caption = caption.rstrip(" \n")  # 移除末尾的空格和换行符
        tokenized=tokenize_caption(caption, self.tokenizer)
        if self.use_hf_key_format:
            example={
                'pixel_values':image,
                'input_ids': tokenized.input_ids, 
                'attention_mask': tokenized.attention_mask,
            }
        else:
            example={
                'image':image,
                'token': tokenized,
                'caption': caption,  
            }
        return example


if __name__ == "__main__":
    ...
