# 接受 captions.txt 作为输入
# 每一条 caption 的作用是：
# - 找对应的原图片，经过 vae 编码成 latent
# - 去某个 reflow dataset 中检索对应的 noise

# 多进程
# 只加载 vae


import sys
sys.path.append('.')
from pathlib import Path
import os
import multiprocessing as mp
import json
from tqdm.auto import tqdm
import numpy as np
import random
import time
from argparse import Namespace, ArgumentParser
from loguru import logger
import torch
from glob import glob
from collections import defaultdict
import torchvision.transforms as T
from torch.utils.data import Dataset, DataLoader
from PIL import Image
from torchvision.transforms.functional import InterpolationMode
import random
from diffusers import AutoencoderKL


from reflow.data.utils import LMDB_ndarray, data2lmdb
from reflow.data.dataset import get_reflow_dataset
from reflow.utils import set_seed, devide_to_groups, scale_image



def get_transforms(resolution):
    
    
    transforms = []
    transforms.append(T.Resize(resolution, InterpolationMode.BILINEAR, ))
    transforms.append(T.CenterCrop(resolution))
    transforms.append(T.ToTensor()) # [-1,1]
    transforms.append(scale_image)
    transforms = T.Compose(transforms)
    return transforms

def get_rnd_noise(prompt, retrive_noise=False, cap_to_noise_ids=None, reflow_ds=None):
    if retrive_noise==False:
        rnd_noise = torch.randn(len(prompt), 4, 64, 64)
    else:
        all_noise = []
        for p in prompt:
            noise_ids = cap_to_noise_ids[p]
            if len(noise_ids)==0:
                rnd_noise=torch.randn(4, 64, 64)
            else:
                noise_id = noise_ids[random.randint(0, len(noise_ids)-1)] # pick one randomly
                # rnd_noise = reflow_ds[noise_id]['latent']
                rnd_noise = reflow_ds[noise_id]['noise']
                
            all_noise.append(rnd_noise)
        all_noise=torch.stack(all_noise,dim=0)
        rnd_noise = all_noise
    return rnd_noise
    
# def get_latent_var(prompt, vae, cap_to_img_path, image_transform,):
def get_latent_var(batch, vae,):
    posterior = vae.encode(batch.to(vae.device, dtype=vae.dtype)).latent_dist
    latent_var = posterior.sample() 
    # latent_var = posterior.mode()
    latent_var = latent_var * 0.18215
    return latent_var
    

class CocoImgDS(Dataset):
    def __init__(self, caps, cap_to_img_path ,image_transform=None) -> None:
        super().__init__()
        self.caps=caps
        self.cap_to_img_path=cap_to_img_path
        self.image_transform=image_transform
        
    def __len__(self):
        return len(self.caps)
    
    def __getitem__(self, index):
        p=self.caps[index]
        img_path = self.cap_to_img_path[p]
        image = Image.open(img_path).convert('RGB')
        if self.image_transform:
            image = self.image_transform(image)
        return image

@torch.no_grad()
def main(caps, idx_se, device, args, save_dir:Path, image_dir: Path, cap_to_img_path:dict=None, cap_to_noise_ids:dict=None):
    # 加载 vae
    dtype=torch.float16
    vae=AutoencoderKL.from_pretrained(
        'checkpoints/AltDiffusion', 
        torch_dtype=dtype,
        subfolder='vae')
    vae.requires_grad_(False).eval().to(device)
    
    logger.add(str(save_dir / 'run.log'))
    
    if args.retrive_noise:
        reflow_ds = get_reflow_dataset(
            args.reflow_ds_path,
            src_type='lmdb',
        )
    else:
        reflow_ds=None
        
    ds = CocoImgDS(
        caps, cap_to_img_path, get_transforms(args.resolution)
    )
    dl=DataLoader(ds, batch_size=args.bs, shuffle=False, num_workers=args.dl_workers)
    dl_iter=iter(dl)
    
    i_start, i_end = idx_se
    idx = 0
    bs=args.bs
    time_start = time.time()
    while idx < len(caps):
        if idx+bs >= len(caps):
            bs = len(caps)-idx
        prompt = caps[idx:idx+bs]
        s, e = idx+i_start, idx+bs+i_start
        
        # rnd noise
        # 可能需要 retrieve
        rnd_noise = get_rnd_noise(prompt, retrive_noise=args.retrive_noise, cap_to_noise_ids=cap_to_noise_ids, reflow_ds=reflow_ds)
        
        # latent var
        # 需要根据指定的 prompt 拿到图片路径，加载，preprocess , 然后构成一个 batch , 送入 vae 编码，
        batch = next(dl_iter)
        latent_var = get_latent_var(batch, vae,)
        
        rnd_noise = rnd_noise.to(dtype=vae.dtype).cpu()
        latent_var = latent_var.to(dtype=vae.dtype).cpu()
        imgs2save = torch.stack([rnd_noise, latent_var], dim=0).transpose(0, 1).numpy()
        for i, img2save in enumerate(imgs2save, start=s):
            np.save(str(image_dir / f'{i}.npy'), img2save)
            
        idx += bs

        logger.info(
            f'{device}: [{idx}/{len(caps)}] ; time elapased {time.time()-time_start:.3f}')
        
def save_config(args, save_path):
    config2save = vars(args)
    config2save.pop('save_dir')
    json.dump(config2save, open(save_path, 'w'))
    
def prepare_args():
    args = Namespace()
    args.caption_path = ''
    args.save_dir = 'data/test_data_gen'
    args.seed = 23
    args.devices = [0]
    args.split=''

    # receive args from cmd
    parser = ArgumentParser()
    parser.add_argument(
        "--caption_path",
        type=str,
    )
    parser.add_argument(
        "--save_dir",
        type=str,
    )
    parser.add_argument(
        "--devices",
        type=str,
    )
    parser.add_argument(
        "--seed",
        type=int,
    )
    parser.add_argument(
        "--split",
        type=str,
    )
    parser.add_argument(
        "--retrive_noise",
        action="store_true",
    )
    parser.add_argument(
        "--reflow_ds_path",
        type=str,
    )
    parser.add_argument(
        "--resolution",
        type=int,
        default=512,
    )
    parser.add_argument(
        "--dl_workers",
        type=int,
        default=0,
    )
    parser.add_argument(
        "--bs",
        type=int,
        default=1,
    )
    parser.parse_args(namespace=args)
    
    if isinstance(args.devices, str):
        args.devices = [int(i) for i in (args.devices).rstrip(',').split(',')]
    if args.retrive_noise:
        assert args.reflow_ds_path
    return args



if __name__ == "__main__":
    mp.set_start_method('spawn')
    
    args = prepare_args()
    
    # # ! debug
    # ############################
    # args.devices=[0,1]
    # args.caption_path='tmp/coco2014_train_random1000.txt'
    # args.save_dir='data/test_data_gen'
    # args.split='train'
    # args.retrive_noise=True
    # args.reflow_ds_path='data/coco2014_reflow/alt_gen_train5M'
    # args.bs=50
    # args.dl_workers=4
    # ############################

    set_seed(args.seed)
    save_dir = Path(args.save_dir)
    save_dir.mkdir(parents=True, exist_ok=True)
    
    content_dir = save_dir / 'content'
    content_dir.mkdir(parents=True, exist_ok=True)
    
    image_dir = content_dir / 'images'
    image_dir.mkdir(parents=True, exist_ok=True)
    
    os.system(f"cp {args.caption_path} {str(content_dir / 'captions.txt')}")
    all_caps = open(args.caption_path, 'r').read().splitlines()
    args.num_samples = len(all_caps)
    logger.add(str(save_dir / f'run.log'))
    logger.info(f'total samples {args.num_samples}')
    
    device_list = [f'cuda:{i}' for i in args.devices]
    num_workers = len(device_list)
    logger.info(f'use devices {args.devices} ; {num_workers} in total')
    
    # 从 coco 中加载 json , 构造从 caption 到 img_path 和 noise 的映射
    assert args.split in ['train', 'val']
    json_path = f'data/coco2014/annotations/captions_{args.split}2014.json'
    json_info = json.load(open(json_path, 'r'))
    
    # format like : data/coco2014/val2014/COCO_val2014_000000000042.jpg
    cap_to_img_path = {}
    for a in tqdm(json_info['annotations']):
        cap = a['caption']
        # ! coco 中有的文本格式错误需要做处理
        cap = cap.replace('\n', '').strip(' ')
        cap_to_img_path[cap] = f"data/coco2014/{args.split}2014/COCO_{args.split}2014_{a['image_id']:012}.jpg"
        
    if args.retrive_noise:
        # # 搜索目标目录下所有的 caption 文件, 按顺序将所有的 captions 拼接在一起
        reflow_ds_cap_paths = sorted(glob(f'{args.reflow_ds_path}/**/*.txt', recursive=True))
        reflow_ds_caps = []
        for cap_path in reflow_ds_cap_paths:
            reflow_ds_caps.extend(open(cap_path).read().splitlines())
        cap_to_noise_ids = defaultdict(list)
        for idx, cap in tqdm(enumerate(reflow_ds_caps)):
            cap_to_noise_ids[cap].append(idx)
    else:
        cap_to_noise_ids=None
        
    
    save_config(args, save_path= str(save_dir / 'index.json'))
    groups, groups_se = devide_to_groups(all_caps, num_workers)
    
    workers = []
    main_func_args = (
        args,
        save_dir,
        image_dir,
        cap_to_img_path, 
        cap_to_noise_ids,
    )
    
    for i in range(num_workers):
        p = mp.Process(target=main, args=(
            groups[i], groups_se[i], device_list[i],) + main_func_args)
        p.start()
        workers.append(p)
        logger.info(f'process in {device_list[i]} started')
        
    for p in workers:
        p.join()
        
    logger.info(f'done')
    
    logger.info(f'converting npy data to lmdb')
    data2lmdb(str(image_dir))
    logger.info(f'lmdb construction completion')