"""功能有限，慎用。
制造一个分段采样的过程
将 [0,1] 的 t 区间分成若干段；假设分成 n 段，需要提供 n 个 t 起点，最后一个点是 1.0
假设分成3段，t 起点分别为 0.0, 0.2, 0.8 ; 三段区间为 [0,0.2], [0.2,0.8], [0.8,1.0]
每一段区间，指定用什么模型进行采样
- 如果用 diffusion model , 就用加载的 pipeline ckpt 进行该区间内的采样
- 如果用 reflow model , 需要指定 reflow model ckpt , reflow 采样步数。函数会加载对应的 reflow model 然后得到采样结果。

还需要 diffusion model 进行一次全程采样得到 ground truth 进行对比。可以指定1段区间为 [0,1], 全程用 diffusion model 进行采样
"""
# %%import
from reflow.utils import _PIPELINES, _SCHEDULERS
from copy import deepcopy
import torch
from tqdm import tqdm, trange
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from torchvision.utils import make_grid
from typing import Callable, List, Optional, Union
from reflow.utils import decode_latents, nothing
from reflow.utils import set_seed
from diffusers.utils import randn_tensor
import math
from diffusers import UNet2DConditionModel
from loguru import logger

# %%config设置
device = 'cuda:0'
diffusers_pipeline = 'stable_diffusion'
diffusers_scheduler = 'dpm_solver_multi'
diffusers_pipeline_ckpt = 'checkpoints/SD-1-4'
ckpt_path = None
ckpt_path = "logs/pokemon/distill_ds/main/score_model_s2000.pth"
overide_timesteps = None

use_xformers = True

logger.add("tmp/tmp.log")

# %%加载 diffusion pipeline; 定义 reflow model
pipeline_cls = _PIPELINES[diffusers_pipeline]
scheduler_cls = _SCHEDULERS[diffusers_scheduler]

weight_dtype = torch.float16
pipeline = pipeline_cls.from_pretrained(
    diffusers_pipeline_ckpt,
    torch_dtype=weight_dtype,
    safety_checker=None,
    requires_safety_checker=False,
)
if ckpt_path:
    pipeline.unet.load_state_dict(torch.load(ckpt_path))
pipeline.scheduler = scheduler_cls.from_config(pipeline.scheduler.config)
if diffusers_scheduler == 'euler_dummy':
    pipeline.scheduler.overide_timesteps = overide_timesteps
pipeline.vae.load_state_dict(torch.load(
    "checkpoints/sd-vae-ft-mse/diffusion_pytorch_model.bin"))
pipeline = pipeline.to(device)

if use_xformers:
    pipeline.enable_xformers_memory_efficient_attention()


def load_reflow_model(reflow_model_ckpt_path):
    unet = UNet2DConditionModel.from_pretrained(
        diffusers_pipeline_ckpt,
        subfolder='unet',
    )
    unet.load_state_dict(torch.load(reflow_model_ckpt_path,
                         map_location='cpu'), strict=True)
    unet.to(device, dtype=weight_dtype).eval().requires_grad_(False)
    if use_xformers:
        unet.enable_xformers_memory_efficient_attention()
    return unet

# %%定义reflow model 采样过程


def print_grid(numbers):
    import math
    # 定义要输出的数字列表
    # numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9]
    # 计算表格的行数和列数
    num_rows_cols = int(math.sqrt(len(numbers)))
    # 将数字列表转换为二维列表
    grid = [numbers[i:i+num_rows_cols]
            for i in range(0, len(numbers), num_rows_cols)]
    # 输出表格
    for row in grid:
        logger.info(" ".join([str(num) for num in row]))
    logger.info('\n')


def print_diff_info(pred, target, i):
    bs = pred.shape[0]
    cos_sims = torch.cosine_similarity(
        pred.view(bs, -1), target.view(bs, -1), dim=1)
    logger.info(f'step {i+1} cosine similarity')
    print_grid(cos_sims.tolist())

    l2_losses = torch.square(
        pred.view(bs, -1) - target.view(bs, -1)).mean(dim=-1)
    # diff_norms = torch.norm(pred.view(bs, -1) - target.view(bs, -1), p='fro', dim=1)
    logger.info(f'step {i+1} l2 loss')
    print_grid(l2_losses.tolist())

    target_norms = torch.norm(target.view(bs, -1), p='fro', dim=1)
    pred_norms = torch.norm(pred.view(bs, -1), p='fro', dim=1)
    target_pred_norms = [f'{tn.item()}-{pn.item()}' for tn,
                         pn in zip(target_norms, pred_norms)]
    logger.info(f'step {i+1} target-pred norm')
    print_grid(target_pred_norms)


def reflow_sample(reflow_model, timesteps, sample, condition, uncond_condition=None, guidance_scale=1.0, dt_multiplier=1.0, pred_multiplier=1.0, target=None, target_variation=False, ):
    with torch.no_grad():
        for i in trange(len(timesteps)-1):
            t = timesteps[i]
            t_n = timesteps[i+1]
            vec_t = (999*t)
            pred = reflow_model(sample, timestep=vec_t, **condition).sample
            if uncond_condition and guidance_scale > 1:
                uncond_pred = reflow_model(
                    sample, timestep=vec_t, **uncond_condition).sample
                pred = uncond_pred + guidance_scale * (pred - uncond_pred)
            pred = pred * pred_multiplier
            dt = (t_n - t) * dt_multiplier
            sample = sample + pred * dt

            if target != None:
                print_diff_info(pred, target, i)
                if target_variation:  # TODO : 将 target 更换成上一步的 pred
                    target = pred

    return sample

# %%采样config


# reflow
t_sep = [0,]
model_types = ['reflow']

num_samples = [1]  # ! not working for diffusion model sampling
reflow_guidances = [1]
T_intervals = [(1e-3, 1)]
dt_multipliers = [1/(Te-Ts) for Ts, Te in T_intervals]
pred_multipliers = [1 for _, _ in T_intervals]
reflow_timesteps = None

model_types = ['reflow']
reflow_timesteps = [
    torch.tensor([1e-3, 0.1, 1]),
]
dt_multipliers = [1/(1-1e-3)] * len(reflow_timesteps)


reflow_model_ckpts = [
    # "logs/pokemon/1reflow/init2Reflow_l2/score_model_s2000.pth",
    # "logs/pokemon/2reflow/AutoCaption/score_model_s2000.pth",
    "logs/pokemon/distill/AutoCaption/score_model_s2000.pth",
    # "logs/pokemon/distill/AutoCaption_1reflow/score_model_s2000.pth",
    # "logs/pokemon/finetune_sd/main/score_model_s1000.pth",
    # "logs/pokemon/distill_ds/main/score_model_s2000.pth",
    
    # "logs/emoji/1reflow/init2Reflow/score_model_s1000.pth",
    # "logs/emoji/2reflow/AutoCaption/score_model_s2000.pth",
    # "logs/emoji/distill/AutoCaption/score_model_s2000.pth",
    # "logs/emoji/distill/AutoCaption_1reflow/score_model_s2000.pth",
    # "logs/emoji/finetune_sd/main/score_model_s1000.pth",
    # "logs/emoji/distill_sd/main/score_model_s2000.pth",
    
    # "logs/nouns/1reflow/init2Reflow/score_model_s1000.pth",
    # "logs/nouns/2reflow/AutoCaption/score_model_s2000.pth",
    # "logs/nouns/distill/AutoCaption/score_model_s2000.pth",
    # "logs/nouns/distill/AutoCaption_1reflow/score_model_s2000.pth",
    # "logs/nouns/finetune_sd/main/score_model_s1000.pth",
    # "logs/nouns/distill_sd/main/checkpoints/score_model_s2000.pth",
    
    # "logs/sketch-scene/1reflow/init2Reflow/score_model_s1000.pth",
    # "logs/sketch-scene/2reflow/AutoCaption/score_model_s2000.pth",
    # "logs/sketch-scene/distill/AutoCaption/score_model_s2000.pth",
    # "logs/sketch-scene/distill/AutoCaption_1reflow/score_model_s2000.pth",
    # "logs/sketch-scene/finetune_sd/main/score_model_s1000.pth",
    # "logs/sketch-scene/distill_sd/main/score_model_s2000.pth",
]
t_sep.append(1.0)

num_intervals = len(reflow_model_ckpts)


# diffusion
inference_steps = 2
guidance_scale = 1
num_images_per_prompt = 1

random_seed = 1
if random_seed != -1:
    set_seed(random_seed)

persistent_noise = True
noise = None

sample_res = 64

# %%加载 prompts
# prompts = open('tmp/tmp.txt', 'r').read().splitlines()

# prompts = [
#     # "A photograph of an astronaut riding a horse.",
#     # "A brand new helicopter",
#     # "A puppy wearing a hat",
#     # "a meat sandwich that is on french bread.",
#     # "Simba of Disney Cartoon, digital art, high resolution",
# ]


# # * mostly coco
# prompts = [
#     'A young man smiles and holds a small teddy bear.',
#     'An older man watches a kite fly from across a body of water.',
#     "hyperdetailed robotic skeleton head with blue human eyes, symetry, golden ratio, intricate, detailed,",
#     "The face and ear of a teddy bear",
#     "A double decker bus going down the street.",
#     "a meat sandwich that is on french bread.",
#     'A kitchen with an oven, stove, cabinets and knives',
#     'A man holding a frisbee in a parking lot near water.',
#     "A black dragon with red demonic eyes",
# ]

# # * lexcia
# prompts = [
#     'mid shot portrait of a woman in nightclub, in the style of David cronenberg ,scary, weird, high fashion, ID magazine, vogue magazine, homes and garden magazine, surprising, freaky, freak show, realistic, sharp focus, 8k high definition, medium format film photography, photo realistic, insanely detailed, intricate, elegant, art by les edwards and David kostic and stanley lau and artgerm',
#     "cut paper portrait of worf, klingon, intricate, detailed, sharp focus, layered, paper, unreal engine, cgsociety, patrick cabral, kiri ken, greg rutkowski. ",
#     "a portrait a very ordinary person, by Giuseppe Arcimboldo, portrait, fruit, renaissance, anatomically correct, beautiful perfect face, sharp focus, Highly Detailed",
#     "a cinematic portrait elon musk!! as a trap with cat ears, art by lois van baarle and loish! and rossdraws and sam yang and samdoesarts and artgerm and saruei and disney, digital art, highly detailed, intricate, sharp focus, trending on artstation hq, deviantart, unreal engine 5, 4 k uhd image ",
# ]

# * on distillation
prompts = [
    "A photograph of an astronaut riding a horse.",
    "A puppy wearing a hat",
    "A brand new helicopter.",
    "A beautiful castle, matte painting.",
]

# # * pokemon
# prompts = [
#     "a drawing of a blue and yellow dragon",
#     "a very cute looking pokemon character",
#     "an image of a cartoon character flying through the air",
#     "a drawing of a blue sea turtle holding a rock",
#     "a drawing of a green pokemon with red eyes",
#     "a yellow and white pokemon pokemon character",
#     "a white pokemon with a red and blue tail",
#     "a cartoon pikachu with big eyes and big ears",
#     "a drawing of a red and yellow insect",
# ]

# # * pokemon-mix
# prompts = [
#     "Girl with a pearl earring",
#     "Hello Kitty",
#     "Donald Trump",
#     "Totoro",
# ]

prompts = [item for item in prompts for _ in range(num_images_per_prompt)]

if not (persistent_noise and isinstance(noise, torch.Tensor)):
    noise = torch.randn(len(prompts), 4, sample_res,
                        sample_res, device=device, dtype=weight_dtype)

# %%使用 diffusion model 采样获取数据
example = pipeline.inference_latent(
    prompt=prompts,
    latents=noise,
    num_inference_steps=inference_steps,
    # num_images_per_prompt=1,
    guidance_scale=guidance_scale,
)
# noise = example['noise']
latent = example['latent']
condition = {
    'encoder_hidden_states': example['text_embeddings']
}
uncond_condition = {
    'encoder_hidden_states': example['uncond_text_embeddings']
}
if uncond_condition['encoder_hidden_states'] is None:
    uncond_condition['encoder_hidden_states'] = pipeline._encode_prompt(
        prompt=[""] * len(prompts),
        device=pipeline.device,
        num_images_per_prompt=1,
        do_classifier_free_guidance=False,
    )

# %%混合采样
sample = noise.detach().clone()
for i in range(num_intervals):
    t_start, t_end = t_sep[i], t_sep[i+1]
    start_step, stop_step = int(
        t_start * inference_steps), int(t_end * inference_steps)
    model_type = model_types[i]
    if model_type == 'reflow':
        N = num_samples[i]
        reflow_model_ckpt = reflow_model_ckpts[i]
        reflow_model = load_reflow_model(reflow_model_ckpt)
        Ts, Te = T_intervals[i]
        timesteps = torch.arange(start=Ts, end=Te, step=(Te-Ts)/N)
        timesteps = torch.tensor(timesteps.tolist() + [Te], device=device)
        dt_multiplier = dt_multipliers[i]

        if reflow_timesteps:
            timesteps = reflow_timesteps[i].to(device)

        pred_multiplier = pred_multipliers[i]

        sample = reflow_sample(
            reflow_model,
            timesteps,
            sample,
            condition,
            uncond_condition=uncond_condition,
            guidance_scale=reflow_guidances[i],
            dt_multiplier=dt_multiplier,

            pred_multiplier=pred_multiplier,

            # target=latent-noise, # TODO : 计算 sample 过程中每次的 pred 和 distill 目标的差异
            # target_variation=True, # TODO : 将每次的 target 换成上一次的 pred
        )
    elif model_type == 'diffusion':
        sample_example = pipeline.inference_latent(
            prompt=prompts,
            latents=sample,
            num_inference_steps=inference_steps,
            guidance_scale=guidance_scale,
            start_step=start_step,
            stop_step=stop_step,
        )
        sample = sample_example['latent']
    else:
        raise(NotImplementedError)
sample_latent = sample
# %%redit image
redit = False
strength = 0.4

if random_seed != -1:
    generator = torch.Generator()
    generator.manual_seed(random_seed)
else:
    generator = None

if redit:
    noisy_latent, start_step = pipeline.add_noise(
        inference_steps, strength=strength, latents=sample_latent, generator=generator)
    redit_example = pipeline.inference_latent(
        prompt=prompts,
        latents=noisy_latent,
        num_inference_steps=inference_steps,
        guidance_scale=guidance_scale,
        start_step=start_step,
    )
    redit_latent = redit_example['latent']
else:
    redit_latent = sample_latent

# %%打印输出图片
nrow = int(math.sqrt(len(prompts)))
nrow = max(nrow, len(prompts)//nrow)


def latent2img(latent):
    image = decode_latents(pipeline.vae, latent)
    image = make_grid(image, nrow=nrow, )
    image = image.mul(255.).to(dtype=torch.uint8).permute(1, 2, 0)
    image = Image.fromarray(image.numpy())
    return image


with torch.no_grad():
    image = latent2img(latent)
    sample_image = latent2img(sample_latent)
    if redit:
        redit_image = latent2img(redit_latent)

num_subplots = 2
if redit:
    num_subplots += 1
plt.figure(figsize=(15, 15))
plt.subplot(1, num_subplots, 1)
plt.imshow(image)
image.save('tmp/image.jpg')
plt.subplot(1, num_subplots, 2)
plt.imshow(sample_image)
sample_image.save('tmp/sample_image.jpg')
if redit:
    plt.subplot(1, num_subplots, 3)
    plt.imshow(redit_image)
for p in prompts:
    print(p)
plt.show()

# %%
# image.show()
sample_image.show()
# %%
