# 实现最简单的功能：从 coco 数据集中随机加载 noise 和 img , 将 img 编码成 latent , 然后随机采样 noise , 将三元组存储为 npy 文件 ; 最后打包成 lmdb

import sys
sys.path.append('.')
from pathlib import Path
import os
import multiprocessing as mp
import json
from tqdm.auto import tqdm
import numpy as np
import time
from argparse import Namespace, ArgumentParser
from loguru import logger
import torch
import torchvision.transforms as T
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from torchvision.transforms.functional import InterpolationMode
from diffusers import AutoencoderKL


from reflow.data.utils import LMDB_ndarray, data2lmdb
from reflow.utils import set_seed, devide_to_groups, cycle, scale_image

class CocoDataset(Dataset):
    def __init__(self, data_root, split ,image_transform=None) -> None:
        super().__init__()
        assert split in ['train', 'val']
        self.data_root = data_root
        self.split = split
        self.image_transform = image_transform
        
        json_path = f'{data_root}/annotations/captions_{split}2014.json'
        json_info = json.load(open(json_path, 'r'))
        
        image_id_to_caps = []
        image_id_to_paths = []
        for a in tqdm(json_info['annotations']):
            cap = a['caption']
            # ! coco 中有的文本格式错误需要做处理
            cap = cap.replace('\n', '').strip(' ')
            image_id_to_caps.append(cap)
            image_id_to_paths.append(f"{data_root}/{split}2014/COCO_{split}2014_{a['image_id']:012}.jpg")
        self.image_id_to_caps = image_id_to_caps
        self.image_id_to_paths = image_id_to_paths
        
        
    def __len__(self):
        return len(self.image_id_to_caps)
    
    def __getitem__(self, i):
        
        cap = self.image_id_to_caps[i]
        img_path = self.image_id_to_paths[i]
        image = Image.open(img_path).convert('RGB')
        if self.image_transform:
            image = self.image_transform(image)
        return {
            'caption':cap,
            'image':image,
        }

    
def get_transforms(resolution):
    transforms = []
    transforms.append(T.Resize(resolution, InterpolationMode.BILINEAR, ))
    transforms.append(T.CenterCrop(resolution))
    transforms.append(T.ToTensor()) # [-1,1]
    transforms.append(scale_image)
    transforms = T.Compose(transforms)
    return transforms



@torch.no_grad()
def main(seq, idx_se, device, caption_path:Path, args, save_dir:Path, image_dir: Path,  ):
    # ! 每个 process 的 txt 保存在单独文件中，执行完之后再合并
    # 加载 vae
    dtype=torch.float16
    vae=AutoencoderKL.from_pretrained(
        'checkpoints/AltDiffusion', 
        torch_dtype=dtype,
        subfolder='vae')
    vae.requires_grad_(False).eval().to(device)
    
    logger.add(str(save_dir / 'run.log'))
    
    # 加载 coco 数据集
    coco_ds = CocoDataset(
        "data/coco2014",
        split=args.split,
        image_transform=get_transforms(args.resolution),
    )
    # dl=DataLoader(coco_ds, batch_size=args.bs, shuffle=True) # NOTE 这里需要开shuffle 保证每个 process 是不同的
    dl=DataLoader(coco_ds, batch_size=args.bs, shuffle=True,num_workers=args.dl_workers) # NOTE 这里需要开shuffle 保证每个 process 是不同的
    dl=cycle(dl)
    
    # 新建该 process 的 txt 文件，并每次追加写入
    if caption_path.exists():
        os.remove(str(caption_path))
    caption_file = caption_path.open('a')
    
    i_start, i_end = idx_se
    idx = 0
    bs=args.bs
    time_start = time.time()
    while idx < len(seq):
        if idx+bs >= len(seq):
            bs = len(seq)-idx
        s, e = idx+i_start, idx+bs+i_start
        batch = next(dl)
        batch = {k:v[:bs] for k,v in batch.items()}
        prompt, images = batch['caption'], batch['image']
        
        # rnd noise
        rnd_noise = torch.randn((bs,4,64,64))
        # latent var
        posterior = vae.encode(images.to(vae.device, dtype=vae.dtype)).latent_dist
        latent_var = posterior.sample() 
        # latent_var = posterior.mode()
        latent_var = latent_var * 0.18215
        
        rnd_noise = rnd_noise.to(dtype=vae.dtype).cpu()
        latent_var = latent_var.to(dtype=vae.dtype).cpu()
        imgs2save = torch.stack([rnd_noise, latent_var], dim=0).transpose(0, 1).numpy()
        
        for i, img2save in enumerate(imgs2save, start=s):
            np.save(str(image_dir / f'{i}.npy'), img2save)

        # 如此写入完成之后使用 .read().splitlines() 可以正常读取
        caption_file.write('\n'.join(prompt)+'\n')

        idx += bs

        logger.info(
            f'{device}: [{idx}/{len(seq)}] ; time elapased {time.time()-time_start:.3f}')
            
    caption_file.close()

def save_config(args, save_path):
    config2save = vars(args)
    config2save.pop('save_dir')
    json.dump(config2save, open(save_path, 'w'))
    
def prepare_args():
    args = Namespace()
    args.save_dir = 'data/test_data_gen'
    args.seed = 23
    args.devices = [0]
    args.split=''
    args.num_samples = 10

    # receive args from cmd
    parser = ArgumentParser()
    parser.add_argument(
        "--save_dir",
        type=str,
    )
    parser.add_argument(
        "--devices",
        type=str,
    )
    parser.add_argument(
        "--seed",
        type=int,
    )
    parser.add_argument(
        "--num_samples",
        type=int,
    )
    parser.add_argument(
        "--split",
        type=str,
    )
    parser.add_argument(
        "--resolution",
        type=int,
        default=512,
    )
    parser.add_argument(
        "--dl_workers",
        type=int,
        default=0,
    )
    parser.add_argument(
        "--bs",
        type=int,
        default=1,
    )
    parser.parse_args(namespace=args)
    
    if isinstance(args.devices, str):
        args.devices = [int(i) for i in (args.devices).rstrip(',').split(',')]

    return args


if __name__ == "__main__":
    mp.set_start_method('spawn')
    
    args = prepare_args()
    
    # # ! debug
    # ############################
    # args.devices=[0]
    # args.save_dir='data/test_data_gen'
    # args.split='val'
    # args.num_samples = 10
    # args.bs=5
    # args.dl_workers=4
    # ############################

    set_seed(args.seed)
    save_dir = Path(args.save_dir)
    save_dir.mkdir(parents=True, exist_ok=True)
    
    content_dir = save_dir / 'content'
    content_dir.mkdir(parents=True, exist_ok=True)
    
    image_dir = content_dir / 'images'
    image_dir.mkdir(parents=True, exist_ok=True)
        
    assert args.num_samples

    logger.add(str(save_dir / f'run.log'))
    logger.info(f'total samples {args.num_samples}')
    
    device_list = [f'cuda:{i}' for i in args.devices]
    num_workers = len(device_list)
    logger.info(f'use devices {args.devices} ; {num_workers} in total')

    save_config(args, save_path= str(save_dir / 'index.json'))
    groups, groups_se = devide_to_groups(list(range(args.num_samples)), num_workers)
    
    workers = []
    main_func_args = (
        args,
        save_dir,
        image_dir,
    )
    
    for i in range(num_workers):
        p = mp.Process(target=main, args=(
            groups[i], groups_se[i], device_list[i], content_dir / f'captions_part{i}.txt') + main_func_args)
        p.start()
        workers.append(p)
        logger.info(f'process in {device_list[i]} started')
        
    for p in workers:
        p.join()
        
    logger.info(f'done')
    
    logger.info(f'converting npy data to lmdb')
    data2lmdb(str(image_dir))
    logger.info(f'lmdb construction completion')
    
    logger.info(f'merging all prompts to one file')
    all_caps = []
    for i in range(num_workers):
        with (content_dir / f'captions_part{i}.txt').open('r') as f:
            all_caps.extend(f.read().splitlines())
        os.remove(content_dir / f'captions_part{i}.txt')
    caption_path = content_dir / 'captions.txt'
    if caption_path.exists():
        os.remove(str(caption_path))
    with caption_path.open('w') as f:
        f.write('\n'.join(all_caps))
    logger.info(f'merging prompts done')
    