"""功能有限，慎用。
将正常 diffusion model 的采样过程的前面一部分换成 reflow model 的采样，以此来检验 reflow model 对 diffusion model 路径拟合的能力。
指定 inference steps, diffusion model 的采样步数。指定 stop time t_s , [0,t_s) 是 reflow model 的采样过程，[t_s, 1] 是 diffusion model 的采样过程。diffusion model 的采样 start step 根据 int(stop_time * inference_steps) 得出。


"""
# %%import
from reflow.utils import _PIPELINES, _SCHEDULERS
from copy import deepcopy
import torch
from tqdm import tqdm, trange
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from torchvision.utils import make_grid
from typing import Callable, List, Optional, Union
from reflow.utils import decode_latents, nothing
from reflow.utils import set_seed
import math

# %%config设置
device = 'cuda:0'
diffusers_pipeline = 'alt_diffusion'
diffusers_scheduler = 'dpm_solver_multi'
diffusers_pipeline_ckpt = 'checkpoints/AltDiffusion'
reflow_ckpt_path = "logs/tmp/random10k_stop0.2/checkpoints/score_model_s3125.pth"

# %% 定义 diffusion pipeline 的采样过程

@torch.no_grad()
def inference_latent(
    pipeline,
    prompt: Union[str, List[str]],
    height: Optional[int] = None,
    width: Optional[int] = None,
    num_inference_steps: int = 50,
    guidance_scale: float = 7.5,
    negative_prompt: Optional[Union[str, List[str]]] = None,
    num_images_per_prompt: Optional[int] = 1,
    eta: float = 0.0,
    generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
    latents: Optional[torch.FloatTensor] = None,
    output_type: Optional[str] = "pil",
    return_dict: bool = True,
    callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
    callback_steps: Optional[int] = 1,
    stop_step: int = -1,
    start_step: int = -1,
):

    # 0. Default height and width to unet
    height = height or pipeline.unet.config.sample_size * pipeline.vae_scale_factor
    width = width or pipeline.unet.config.sample_size * pipeline.vae_scale_factor

    # 1. Check inputs. Raise error if not correct
    pipeline.check_inputs(prompt, height, width, callback_steps)

    # 2. Define call parameters
    batch_size = 1 if isinstance(prompt, str) else len(prompt)
    device = pipeline._execution_device
    # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
    # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
    # corresponds to doing no classifier free guidance.
    do_classifier_free_guidance = guidance_scale > 1.0

    # 3. Encode input prompt
    text_embeddings = pipeline._encode_prompt(
        prompt, device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt
    )

    # 4. Prepare timesteps
    pipeline.scheduler.set_timesteps(num_inference_steps, device=device)
    timesteps = pipeline.scheduler.timesteps
    # # TODO
    # print(timesteps)

    # 5. Prepare latent variables
    num_channels_latents = pipeline.unet.in_channels
    latents = pipeline.prepare_latents(
        batch_size * num_images_per_prompt,
        num_channels_latents,
        height,
        width,
        text_embeddings.dtype,
        device,
        generator,
        latents,
    )
    rnd_noise = latents.detach().clone()

    # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
    extra_step_kwargs = pipeline.prepare_extra_step_kwargs(generator, eta)

    # 7. Denoising loop
    num_warmup_steps = len(timesteps) - \
        num_inference_steps * pipeline.scheduler.order

    if stop_step == -1:
        stop_step = num_inference_steps
    # for i, t in enumerate(timesteps):
    for i, t in enumerate(timesteps):
        if i < start_step:
            continue
        if i == stop_step:
            break

        # expand the latents if we are doing classifier free guidance
        latent_model_input = torch.cat(
            [latents] * 2) if do_classifier_free_guidance else latents
        latent_model_input = pipeline.scheduler.scale_model_input(
            latent_model_input, t)

        noise_pred = pipeline.unet(
            latent_model_input, t, encoder_hidden_states=text_embeddings).sample

        # perform guidance
        if do_classifier_free_guidance:
            noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
            noise_pred = noise_pred_uncond + guidance_scale * \
                (noise_pred_text - noise_pred_uncond)

        # compute the previous noisy sample x_t -> x_t-1
        latents = pipeline.scheduler.step(
            noise_pred, t, latents, **extra_step_kwargs).prev_sample

    example = {
        'noise': rnd_noise.detach(),
        'latent': latents.detach(),
        'text_embeddings': text_embeddings.chunk(2)[1].detach(),
    }
    return example


# %%加载 diffusion pipeline
pipeline_cls = _PIPELINES[diffusers_pipeline]
scheduler_cls = _SCHEDULERS[diffusers_scheduler]

pipeline = pipeline_cls.from_pretrained(
    diffusers_pipeline_ckpt,
    # torch_dtype=weight_dtype,
    safety_checker=None,
    requires_safety_checker=False,
)
pipeline.scheduler = scheduler_cls.from_config(pipeline.scheduler.config)
pipeline = pipeline.to(device)


# %%加载 reflow model
reflow_model = deepcopy(pipeline.unet)
if not nothing(reflow_ckpt_path):
    reflow_model.load_state_dict(torch.load(
        reflow_ckpt_path, map_location='cpu'), strict=True)
reflow_model.eval().requires_grad_(False).to(device)
print(f'ckpt from <<{reflow_ckpt_path}>>')

#%%采样config
inference_steps = 25
guidance_scale = 7.5
stop_time = 0.2
stop_step = int(inference_steps * stop_time)

diffusion_sample_start_step = stop_step

reflow_sample_steps = 1
num_samples = reflow_sample_steps

random_seed=891861
if random_seed!=-1:
    set_seed(random_seed)
    
persistent_noise = True
noise=None

switch_reflow_to_diffusion = False

# %%加载 prompts
prompts = open('tmp/tmp.txt', 'r').read().splitlines()
# prompts = [
#     'A kitchen with an oven, stove, cabinets and knives',
#     'A young man smiles and holds a small teddy bear.',
#     'An older man watches a kite fly from across a body of water.',
#     'A man holding a frisbee in a parking lot near water.',
#     "A black dragon with red demonic eyes",
#     "hyperdetailed robotic skeleton head with blue human eyes, symetry, golden ratio, intricate, detailed,",
# ]
if not (persistent_noise and isinstance(noise, torch.Tensor)):
    noise = torch.randn(len(prompts), 4,64,64, device=device, dtype=torch.float)

# %%使用 diffusion model 采样获取数据
example = inference_latent(
    pipeline,
    prompt=prompts,
    latents=noise, 
    num_inference_steps=inference_steps,
    guidance_scale=guidance_scale,
)
# noise = example['noise']
latent = example['latent']
condition = {
    'encoder_hidden_states': example['text_embeddings']
}


# %%使用 reflow model 进行采样 或者切换到 diffusion model 做对比验证

if not switch_reflow_to_diffusion:
    Ts, Te = 1e-3, 1
    timesteps = torch.arange(start=Ts, end=Te, step=(Te-Ts)/num_samples)
    timesteps = torch.tensor(timesteps.tolist() + [1.0], device=device)

    with torch.no_grad():
        sample = noise.detach().clone()
        for i in trange(len(timesteps)-1):
            t = timesteps[i]
            t_n = timesteps[i+1]
            vec_t = (999*t)
            pred = reflow_model(sample, timestep=vec_t, **condition).sample
            dt = t_n - t
            sample = sample + pred * dt
else:
    sample_example = inference_latent(
        pipeline,
        prompt=prompts,
        latents=noise,
        num_inference_steps=int(num_samples/stop_time),
        guidance_scale=guidance_scale,
        stop_step=num_samples,
    )
    sample = sample_example['latent']

# %%使用 diffusion model 完成剩余的采样
sample_example = inference_latent(
    pipeline,
    prompt=prompts,
    latents=sample,
    num_inference_steps=inference_steps,
    guidance_scale=guidance_scale,
    start_step=diffusion_sample_start_step,
)
sample_latent = sample_example['latent']
# %%打印输出图片
nrow = int(math.sqrt(len(prompts)))
nrow = max(nrow, len(prompts)//nrow)
with torch.no_grad():
    image = decode_latents(pipeline.vae, latent)
    image = make_grid(image, nrow=nrow, )
    image = image.mul(255.).to(dtype=torch.uint8).permute(1,2,0)
    image = Image.fromarray(image.numpy())
    sample_image = decode_latents(pipeline.vae, sample_latent)
    sample_image = make_grid(sample_image, nrow=nrow, )
    sample_image = sample_image.mul(255.).to(dtype=torch.uint8).permute(1,2,0)
    sample_image = Image.fromarray(sample_image.numpy())

plt.figure(figsize=(20, 20))
plt.subplot(2, 1, 1)
plt.imshow(image)
plt.subplot(2, 1, 2)
plt.imshow(sample_image)
for p in prompts:
    print(p)
plt.show()
# %%
