# 简单的 inference 脚本，输入 prompt , 输出图片
# 使用 reflow pipeline 和 scheduler 

# 推理支持两种加载模式：默认 SD-1-5 ；加载 llama 模型和新的 lora 参数
# - llama 模式需要加载 llama 模型，使用 peft 加载 lora 权重，同时加载保存的 UNet (其中有 encoder_hid_proj 层) ; 
# - 加载时候指定 load dir ; 加载 adapter 和 unet
# 输入为指定的 prompt 集合
# 设置采样参数：sampler, steps, cfg ; llama lora 模式还可以设置 lora 权重
# 采样图片保存：每个 prompt 产生4图片，保存格式为 prompt_{x}.jpg 和 prompt_grid.jpg ; 文件夹命名为 模型id

import numpy as np
import torch
import torch.utils.checkpoint
# import tomesd
from argparse import ArgumentParser
import os

from accelerate.utils import set_seed
from diffusers import AutoencoderKL, UNet2DConditionModel
from src.diffusers_overwrite import ReflowPipeline, ReflowEulerScheduler, ReflowHighOrderScheduler

from torchvision.transforms.functional import pil_to_tensor, to_pil_image
from tqdm.auto import tqdm
import matplotlib.pyplot as plt
from collections import Counter
from PIL import Image
import math
from pathlib import Path

from transformers import CLIPTextModel, CLIPTokenizer

parser=ArgumentParser()
parser.add_argument('--load_dir', type=str, default=None, help='load dir')
parser.add_argument('--diffusion_name_or_path', type=str, default="checkpoints/SD-1-5",)
parser.add_argument('--resolution', type=int, default=512,)
parser.add_argument('--save_suffix', type=str, default=None,)
args=parser.parse_args()

def make_image_grid(imgs, rows, cols):
    assert len(imgs) == rows * cols

    w, h = imgs[0].size
    grid = Image.new("RGB", size=(cols * w, rows * h))

    for i, img in enumerate(imgs):
        grid.paste(img, box=(i % cols * w, i // cols * h))
    return grid

if __name__=="__main__":
    # * config
    # =================================================================
    model_dtype = torch.float16
    device = "cuda:0"
    use_xformers = True
    scheduler_cls = ReflowHighOrderScheduler
    scheduler_config = dict(
        t_min=0.,
        t_max=1.,
        t_eps=1e-3,
        use_scipy_ode_solver=True,
        ode_method="RK45",
        ode_kwargs=dict(
            rtol=1e-3,
            atol=1e-6,
        ),
    )
    
    deterministic_sampling = True
    seed = 0
    num_inference_steps = 50
    guidance_scale = 1
    num_images_per_prompt = 4
    
    prompts = [
        'A young man smiles and holds a small teddy bear.',
        'An older man watches a kite fly from across a body of water.',
        "hyperdetailed robotic skeleton head with blue human eyes, symetry, golden ratio, intricate, detailed,",
        "The face and ear of a teddy bear",
        "A double decker bus going down the street.",
        "a meat sandwich that is on french bread.",
        'A kitchen with an oven, stove, cabinets and knives',
        'A man holding a frisbee in a parking lot near water.',
        "A black dragon with red demonic eyes",
    ]
    # =================================================================

    res=args.resolution
    height, width = (res,res)
    
    save_dir = Path("examples")
    save_dir = save_dir / args.save_suffix 
    save_dir.mkdir(parents=True, exist_ok=True)
    
    unet=UNet2DConditionModel.from_pretrained(args.load_dir, subfolder='unet', torch_dtype=model_dtype)
    tokenizer = CLIPTokenizer.from_pretrained(args.diffusion_name_or_path, subfolder="tokenizer",)
    text_encoder = CLIPTextModel.from_pretrained(args.diffusion_name_or_path, subfolder="text_encoder", torch_dtype=model_dtype)
    vae=AutoencoderKL.from_pretrained("checkpoints/sd-vae-ft-mse", torch_dtype=model_dtype)
    scheduler = scheduler_cls.from_config(scheduler_config)
    
    pipeline=ReflowPipeline(
        vae=vae,
        text_encoder=text_encoder,
        tokenizer= tokenizer,
        unet= unet,
        scheduler=scheduler,
        safety_checker=None,
        feature_extractor=None,
        requires_safety_checker=False,
    )

    pipeline.to(device)
    if use_xformers:
        pipeline.enable_xformers_memory_efficient_attention()
        
    for prompt in prompts:

        print(f"{prompt}")

        if deterministic_sampling:
            generator = torch.Generator()
            generator.manual_seed(seed)
        else:
            generator = None
            
        samples = pipeline(
            prompt=prompt,
            height=height,
            width=width,
            num_inference_steps=num_inference_steps,
            guidance_scale=guidance_scale,
            generator=generator,
            num_images_per_prompt=num_images_per_prompt,
        ).images
        # save sample images
        for i, sample in enumerate(samples):
            sample:Image.Image
            sample.save(str(save_dir / f"{prompt}_{i}.jpg"))
        # save image gird
        nrow = int(math.sqrt(len(samples)))
        nrow = max(nrow, len(samples) // nrow)
        ncol = len(samples) // nrow
        sample_grid = make_image_grid(samples, nrow, ncol)
        sample_grid.save(str(save_dir / f"{prompt}_grid.jpg"))