"""
需要产生符合 reflow 训练的数据形式: <rnd noise , latent var , prompt>
支持使用一般的 diffusion pipeline 和 scheduler 

如果使用 reflow 模型进行采样需要准备：
- 用 reflow ckpt 替换 diffusion checkpoints 里面的 unet/diffusion_pytorch_model.bin
- 使用 EulerDummyScheduler

从指定路径加载准备好的 captions , 根据这些 captions 来采样

不能直接使用 diffusers 的 pipeline 的 __call__ 函数, 因为其只能输出最终的图片，不能输出隐变量，需要自己重新定义一下 __call__ 函数

可以修改 import 来使用 oneflow 框架加速采样: 需要 oneflow as torch , oneflow 版本的 diffusers , 在 pipeline __call__ 中加入 compile 部分的代码 ; 
! 注意静态图不支持可变的 batch size (所以需要提前计算好)

超参数：
- captions.txt 文件路径
- 数据保存的路径 save_dir
- num_samples
- devices
- 
"""
import sys
sys.path.append('.')
from pathlib import Path
import os
import multiprocessing as mp
import json
import numpy as np
import time
from argparse import ArgumentParser
from typing import Callable, List, Optional, Union
from loguru import logger
import torch

from reflow.data.utils import LMDB_ndarray, data2lmdb

from reflow.utils import set_seed, nothing, _PIPELINES, _SCHEDULERS, devide_to_groups

use_oneflow=False
# use_oneflow=False # ! 目前 oneflow diffusers 有 bug ; better always false
# if use_oneflow:
#     import oneflow as torch
#     from diffusers import OneFlowAltDiffusionPipeline as AltDiffusionPipeline
#     from diffusers import OneFlowDPMSolverMultistepScheduler as DPMSolverMultistepScheduler
#     from diffusers.pipelines.alt_diffusion.pipeline_alt_diffusion_oneflow import UNetGraph
# else:
#     import torch
#     from diffusers import AltDiffusionPipeline
#     from diffusers import DPMSolverMultistepScheduler
    
# from reflow.schedulers.scheduling_euler_dummy import EulerDummyScheduler


@torch.no_grad()
def pipe_call(
    pipeline,
    prompt: Union[str, List[str]],
    height: Optional[int] = None,
    width: Optional[int] = None,
    num_inference_steps: int = 50,
    guidance_scale: float = 7.5,
    negative_prompt: Optional[Union[str, List[str]]] = None,
    num_images_per_prompt: Optional[int] = 1,
    eta: float = 0.0,
    generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
    latents: Optional[torch.FloatTensor] = None,
    output_type: Optional[str] = "pil",
    return_dict: bool = True,
    callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
    callback_steps: Optional[int] = 1,
    stop_step: int = -1,
):
    compile_unet = use_oneflow
    
    # 0. Default height and width to unet
    height = height or pipeline.unet.config.sample_size * pipeline.vae_scale_factor
    width = width or pipeline.unet.config.sample_size * pipeline.vae_scale_factor
    
    # 1. Check inputs. Raise error if not correct
    pipeline.check_inputs(prompt, height, width, callback_steps)
    
    # 2. Define call parameters
    batch_size = 1 if isinstance(prompt, str) else len(prompt)
    device = pipeline._execution_device
    # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
    # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
    # corresponds to doing no classifier free guidance.
    do_classifier_free_guidance = guidance_scale > 1.0
    
    # 3. Encode input prompt
    text_embeddings = pipeline._encode_prompt(
        prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
    )
    
    # 4. Prepare timesteps
    pipeline.scheduler.set_timesteps(num_inference_steps, device=device)
    timesteps = pipeline.scheduler.timesteps
    
    # 5. Prepare latent variables
    num_channels_latents = pipeline.unet.in_channels
    latents = pipeline.prepare_latents(
        batch_size * num_images_per_prompt,
        num_channels_latents,
        height,
        width,
        text_embeddings.dtype,
        device,
        generator,
        latents,
    )
    
    # compile unet graph
    if compile_unet:
        cache_key = (height, width, num_images_per_prompt)
        unet_graph = pipeline.graph_compile_cache.get_graph(
            UNetGraph, cache_key, pipeline.unet)
        if unet_graph.is_compiled is False:
            latent_model_input = torch.cat(
                [latents] * 2) if do_classifier_free_guidance else latents
            _, t = list(enumerate(pipeline.scheduler.timesteps))[0]
            unet_graph.compile(latent_model_input, t, text_embeddings)
    
    # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
    extra_step_kwargs = pipeline.prepare_extra_step_kwargs(generator, eta)
    
    # 7. Denoising loop
    num_warmup_steps = len(timesteps) - \
        num_inference_steps * pipeline.scheduler.order
        
    if stop_step==-1:
        stop_step=num_inference_steps
    # for i, t in enumerate(timesteps):
    for i, t in enumerate(timesteps):
        if i==stop_step:
            break
        
        # expand the latents if we are doing classifier free guidance
        latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
        latent_model_input = pipeline.scheduler.scale_model_input(
            latent_model_input, t)

        # predict the noise residual
        if compile_unet:
            torch._oneflow_internal.profiler.RangePush(
                f"denoise-{i}-unet-graph")
            noise_pred = unet_graph(latent_model_input, t, text_embeddings)
            torch._oneflow_internal.profiler.RangePop()
        else:
            noise_pred = pipeline.unet(
                latent_model_input, t, encoder_hidden_states=text_embeddings).sample

        # perform guidance
        if do_classifier_free_guidance:
            noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
            noise_pred = noise_pred_uncond + guidance_scale * \
                (noise_pred_text - noise_pred_uncond)

        # compute the previous noisy sample x_t -> x_t-1
        latents = pipeline.scheduler.step(
            noise_pred, t, latents, **extra_step_kwargs).prev_sample

    return latents


def save_config(args, save_path):
    config2save = vars(args)
    config2save.pop('save_dir')
    json.dump(config2save, open(save_path, 'w'))


def main(caps, idx_se, device, args, save_dir:Path, image_dir: Path, ):
    set_seed(args.seed)

    pipeline_cls = _PIPELINES[args.pipeline]
    scheduler_cls = _SCHEDULERS[args.scheduler]

    dtype=torch.float16
    pipeline = pipeline_cls.from_pretrained(
        args.pipeline_ckpt, 
        torch_dtype=dtype,
        safety_checker=None,
        requires_safety_checker=False,
    )
    if args.use_xformers:
        try:
            pipeline.enable_xformers_memory_efficient_attention()
        except Exception as e:
            logger.warning(
                f"Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: {e}"
            )
    # 替换 ckpt (optional)
    if not nothing(args.ckpt_path):
        pipeline.unet.load_state_dict(torch.load(args.ckpt_path, map_location='cpu'), strict=True)
        logger.info(
            f'use ckpt <<{args.ckpt_path}>>')
    else:
        logger.warning(f'not specify pretrained score model path')
    pipeline = pipeline.to(device)
    if args.scheduler=='euler_dummy': # NOTE only working for reflow model with target=z1-z0
        pipeline.scheduler = scheduler_cls()
    else:
        pipeline.scheduler = scheduler_cls.from_config(pipeline.scheduler.config)

    logger.add(str(save_dir / 'run.log'))
    
    bs=args.bs
    if args.res is not None:
        height = width = args.res
    else:
        height = width = pipeline.unet.config.sample_size * pipeline.vae_scale_factor
    latent_shape=(bs,pipeline.unet.in_channels,height//pipeline.vae_scale_factor, width//pipeline.vae_scale_factor)
    
    
    i_start, i_end = idx_se
    idx = 0
    time_start = time.time()
    while idx < len(caps):
        if idx+bs >= len(caps):
            bs = len(caps)-idx
        prompt = caps[idx:idx+bs]
        s, e = idx+i_start, idx+bs+i_start
        rnd_noise = torch.randn(size=latent_shape, dtype=dtype)
        latent_var = pipe_call(
            pipeline, prompt, latents=rnd_noise, num_inference_steps=args.infer_steps,
            guidance_scale=args.guidance_scale, stop_step=args.stop_step).to(dtype=rnd_noise.dtype)
        imgs2save = torch.stack(
            [rnd_noise, latent_var.cpu()], dim=0).transpose(0, 1).numpy()
        for i, img2save in enumerate(imgs2save, start=s):
            np.save(str(image_dir / f'{i}.npy'), img2save)
        idx += bs

        logger.info(
            f'{device}: [{idx}/{len(caps)}] ; time elapased {time.time()-time_start:.3f}')


def prepare_args():
    # receive args from cmd
    parser = ArgumentParser()
    parser.add_argument(
        "--caption_path",
        type=str,
    )
    parser.add_argument(
        "--save_dir",
        type=str,
    )
    parser.add_argument(
        "--devices",
        type=str,
        default=-1,
    )
    parser.add_argument(
        "--infer_steps",
        type=int,
        default=10,
    )
    parser.add_argument(
        "--stop_ratio",
        type=float,
        default=1.0,
        help="value in [0, 1] ; if infer_steps is 10 and stop_ratio is 0.2 , inference will stop at 2 step (even if it should infer 10 steps), resulting an intermediate sample (cannot be decoded into sane image)"
    )
    parser.add_argument(
        "--guidance_scale",
        type=float,
        default=7.5,
    )
    parser.add_argument(
        "--seed",
        type=int,
        default=23,
    )
    parser.add_argument(
        "--num_noise_per_prompt",
        type=int,
        default=1,
        help="generate multiple noises and latents per prompt. useful when you specify a small set of prompts and want to generate many noises"
    )
    parser.add_argument(
        "--pipeline",
        type=str,
        default="alt_diffusion",
    )
    parser.add_argument(
        "--scheduler",
        type=str,
    )
    parser.add_argument(
        "--pipeline_ckpt",
        type=str,
    )
    parser.add_argument(
        "--ckpt_path",
        type=str,
        default=None,
    )
    parser.add_argument(
        "--part",
        type=int,
        default=None,
    )
    parser.add_argument(
        "--bs",
        type=int,
        default=10,
    )
    parser.add_argument(
        "--res",
        type=int,
        default=None,
    )
    parser.add_argument(
        "--use_xformers",
        action='store_true',
        default=False,
    )
    args = parser.parse_args()
    
    if isinstance(args.devices, str):
        args.devices = [int(i) for i in (args.devices).rstrip(',').split(',')]
    elif args.devices==-1:
        args.devices = ['cpu']
        
    if args.part is not None:
        assert args.part > 0
        args.seed += args.part
    
    return args


if __name__ == "__main__":
    mp.set_start_method('spawn')

    args = prepare_args()
    
    # # ! debug
    # #############################
    # args.caption_path = 'tmp/1prompt.txt'
    # args.save_dir = 'data/coco2014_reflow/1prompt_20samples'
    # args.infer_steps = 25
    # args.stop_ratio = 0.2
    # args.num_noise_per_prompt = 20
    # args.scheduler = "dpm_solver_multi"
    # args.pipeline_ckpt = "checkpoints/AltDiffusion"
    # args.bs = 1
    # # args.x = ...
    # #############################

    set_seed(args.seed)
    save_dir = Path(args.save_dir)
    save_dir.mkdir(parents=True, exist_ok=True)
    
    content_dir = save_dir / 'content'
    content_dir.mkdir(parents=True, exist_ok=True)
    
    image_dir = content_dir / 'images'
    image_dir.mkdir(parents=True, exist_ok=True)
    
    # os.system(f"cp {args.caption_path} {str(content_dir / 'captions.txt')}")
    all_caps = open(args.caption_path, 'r').read().splitlines()
    all_caps = all_caps * args.num_noise_per_prompt
    with (content_dir / 'captions.txt').open('w') as caption_file:
        caption_file.write('\n'.join(all_caps))
    
    args.num_samples = len(all_caps)
    logger.add(str(save_dir / f'run.log'))
    logger.info(f'total samples {args.num_samples}')
    
    if isinstance(args.devices[0], int):
        device_list = [f'cuda:{i}' for i in args.devices]
    else:
        device_list = args.devices
    num_workers = len(device_list)
    logger.info(f'use devices {args.devices} ; {num_workers} in total')
    
    assert args.pipeline in _PIPELINES , f'available pipelines: {list(_PIPELINES.keys())}'
    assert args.scheduler in _SCHEDULERS, f'available schedulers: {list(_SCHEDULERS.keys())}'
    
    assert 0.0 <= args.stop_ratio <= 1.0 , 'stop_ratio must be in [0,1]'
    args.stop_step = int(args.stop_ratio * args.infer_steps)
    logger.info(f'inference process will stop in [{args.stop_step}/{args.infer_steps}]')
    
    
    save_config(args, save_path= str(save_dir / 'index.json'))
    
    groups, groups_se = devide_to_groups(all_caps, num_workers)
    if use_oneflow:
        assert sum([len(g)%args.bs for g in groups])==0, "static graph do not support variable batch size!"
    workers = []
    main_func_args = (
        args,
        save_dir,
        image_dir,
    )
    for i in range(num_workers):
        p = mp.Process(target=main, args=(
            groups[i], groups_se[i], device_list[i],) + main_func_args)
        p.start()
        workers.append(p)
        logger.info(f'process in {device_list[i]} started')
    
    for p in workers:
        p.join()
        
    logger.info(f'done')
    
    logger.info(f'converting npy data to lmdb')
    data2lmdb(str(image_dir))
    logger.info(f'lmdb construction completion')