"""
- 用保存的 score_model ckpt 替换 AltDiffusion unet 中的 diffusion_pytorch_model.bin 文件
- 新创建 EulerDummyScheduler , 实现用于 reflow 模型的最简单 euler 采样方法
- 如此采样可使用和 AltDiffusionPipeline 兼容的方法

使用用户提供的少量 prompts 进行采样的脚本
支持的功能：
- 指定产生文件的根目录
- 选择不同的 inference step
- 选择不同的 guidance scale

超参数：
- 随机种子 ; 
- 目标文件夹位置 ; 
- device ;
- score model ckpt 的路径 ; (在代码中进行文件移动和替换)
- 推理步数 (list); 
- guidance scale (list);
- num_images_per_prompt

图片保存的格式：
根目录
- captions.txt
- caption1
- - s1
- - - g1
- - - - 1.png
- - - - 2.png
"""
use_controlnet=False
import sys
sys.path.append('.')
if use_controlnet:
    sys.path.insert(0, "src/diffusers-controlnet") # 确保从 diffusers-controlnet 版本加载 model 和 pipeline

from pathlib import Path
import torch
from loguru import logger
import json
import os
from argparse import Namespace
from itertools import product
from torchvision.utils import make_grid
from torchvision.io import read_image, write_png

from reflow.utils import set_seed, nothing, _PIPELINES, _SCHEDULERS
if use_controlnet:
    from diffusers import AltDiffusionControlNetPipeline, StableDiffusionControlNetPipeline
    _PIPELINES['stable_diffusion_controlnet']=StableDiffusionControlNetPipeline
    _PIPELINES['alt_diffusion_controlnet']=AltDiffusionControlNetPipeline


def prepare_args():
    args = Namespace()
    args.prompts = 'tmp/tmp.txt'
    
    args.save_dir = 'samples/free_sample/tmp'
    args.seed = 894483
    args.infer_steps = [25] # NOTE : list
    args.guidance_scale = [1,1.5] # NOTE : list
    args.res = 512
    args.device = 0
    args.num_images_per_prompt = 1
    args.same_latent_per_prompt = True
    
    args.pipeline = 'stable_diffusion'
    args.scheduler = 'euler_dummy'
    args.pipeline_ckpt = 'checkpoints/SD-1-4'
    args.ckpt_path = "logs/online/sdv1-4_laion_1MPrompts_guidance7.5/checkpoints/score_model_s40000.pth" # NOTE only when you want to use reflow model and euler_dummy_scheduler 

    args.load_lora = False
    args.lora_weights_path = "logs/2reflow_lora/checkpoints/lora_weights_s50000" # NOTE dir containing "pytorch_lora_weights.bin"
    
    args.load_controlnet = False
    args.controlnet_path = 'logs/2reflow_controlnet/checkpoints/controlnet_s156250.pth'
    args.control_scale=1.0
    if not (use_controlnet and args.load_controlnet):
        del args.control_scale
    # args.xxx = ...
    return args


def save_config(args, save_path):
    config2save = vars(args)
    config2save.pop('save_dir')
    json.dump(config2save, open(save_path, 'w'))

def make_sg_grid(root_dir, all_s, all_g, num_prompt):
    all_s = [str(s) for s in all_s]
    all_g = [str(g) for g in all_g]
    for i_prompt in range(1, num_prompt+1):
        img_dir=os.path.join(root_dir, f'prompt{i_prompt}')
        all_imgs=[]
        for s, g in product(all_s, all_g):
            img_path = os.path.join(img_dir, f's{s}', f'g{g}', '1.png')
            img = read_image(img_path)
            all_imgs.append(img)
        grid=make_grid(all_imgs, nrow=len(all_g))
        write_png(grid, os.path.join(img_dir, f'grid_s({",".join(all_s)})_g({",".join(all_g)}).png'))

if __name__ == "__main__":
    args = prepare_args()
    set_seed(args.seed)
    
    # 用户自定义 prompts
    prompts = args.prompts
    if isinstance(prompts, str):
        # 指向了一个存储 prompts 的 txt 文件
        prompts = open(prompts, 'r').read().splitlines()
    else: 
        assert isinstance(prompts, list)
    
    # 创建需要的文件夹
    save_dir = Path(args.save_dir) 
    save_dir.mkdir(parents=True, exist_ok=True)
    
    logger.add(str(save_dir / f'run.log'))
    with (save_dir / 'captions.txt').open('w') as f:
        f.write('\n'.join(prompts))
        
    logger.info(f'use random seed {args.seed}')
    
    assert args.pipeline in _PIPELINES , f'available pipelines: {list(_PIPELINES.keys())}'
    assert args.scheduler in _SCHEDULERS, f'available schedulers: {list(_SCHEDULERS.keys())}'

    logger.info(f'inference steps: {args.infer_steps}')
    logger.info(f'guidance scales: {args.guidance_scale}')

    if args.device == -1:
        device = 'cpu'
    else:
        device = f'cuda:{args.device}'
    logger.info(f'process running on {device}')
    
    logger.info(f'same latent per prompt: {args.same_latent_per_prompt}')
    
    save_config(args, str(save_dir / 'index.json'))

    pipeline_cls = _PIPELINES[args.pipeline]
    scheduler_cls = _SCHEDULERS[args.scheduler]

    dtype=torch.float16
    if args.device==-1:
        dtype=torch.float32
    pipe = pipeline_cls.from_pretrained(
        args.pipeline_ckpt,
        torch_dtype=dtype,
        safety_checker=None,
        requires_safety_checker=False,
    )
    
    # 替换 ckpt (optional)
    if not nothing(args.ckpt_path):
        pipe.unet.load_state_dict(torch.load(args.ckpt_path, map_location='cpu'), strict=True)
        logger.info(
            f'use ckpt <<{args.ckpt_path}>>')
    else:
        logger.warning(f'not specify pretrained score model path')
        
    pipe = pipe.to(device)
    if args.scheduler=='euler_dummy': # NOTE only working for reflow model with target=z1-z0
        pipe.scheduler = scheduler_cls()
    else:
        pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config)
        
    if args.load_lora:
        pipe.unet.load_attn_procs(args.lora_weights_path)
        
    if args.load_controlnet and use_controlnet:
        pipe.controlnet.load_state_dict(torch.load(args.controlnet_path, map_location='cpu'), strict=True)
    
    if args.res is not None:
        height = width = args.res
    else:
        height = width = pipe.unet.config.sample_size * pipe.vae_scale_factor
    latent_shape=(1*args.num_images_per_prompt,pipe.unet.in_channels,height//pipe.vae_scale_factor, width//pipe.vae_scale_factor)
    
    extra_kwargs={}
    if hasattr(args, 'control_scale'):
        extra_kwargs['controlnet_conditioning_scale']=args.control_scale
    
    round_cnt = 0
    for prompt_idx, prompt in enumerate(prompts, start=1):
        rnd_latent=None 
        if args.same_latent_per_prompt:
            rnd_latent=torch.randn(latent_shape, device=pipe.device, dtype=dtype)
        for s in args.infer_steps:
            for g in args.guidance_scale:
                images = pipe(
                    prompt=prompt, 
                    height=height,
                    width=width,
                    num_inference_steps=s,
                    guidance_scale=g, 
                    num_images_per_prompt=args.num_images_per_prompt,
                    # disable_pbar=False,
                    latents=rnd_latent,
                    **extra_kwargs, 
                    ).images
                for img_idx, img in enumerate(images, start=1):
                    tgt_dir = save_dir / f'prompt{prompt_idx}' / f's{s}' / f'g{g}'
                    tgt_dir.mkdir(parents=True, exist_ok=True)
                    img.save(tgt_dir / f'{img_idx}.png')
                round_cnt+=1
                logger.info(f'prompt {prompt_idx} step {s} guidance {g} done [{round_cnt}/{len(args.infer_steps)*len(args.guidance_scale)*len(prompts)}]')
                
    logger.info(f'done')
    
    logger.info(f'making grid')
    make_sg_grid(str(save_dir), args.infer_steps, args.guidance_scale, len(prompts))
    logger.info(f'done')