"""多个 ckpt 的结果共同展示的版本
"""

# %%
# 确定配置
import torch
from diffusers import UNet2DConditionModel, AutoencoderKL
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
from transformers import XLMRobertaTokenizer
from reflow.data.dataset import get_reflow_dataset, tokenize_caption
from reflow.utils import to_device, decode_latents
from tqdm.auto import tqdm, trange
from torchvision.utils import make_grid, save_image
import torch.nn.functional as F
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import os

gpu = 0
device = f'cuda:{gpu}' if gpu >= 0 else 'cpu'
ckpt_path = 'checkpoints/AltDiffusion'
# %%
# 加载所有的 model
model = UNet2DConditionModel.from_pretrained(
    ckpt_path,
    subfolder='unet'
)
tokenizer = XLMRobertaTokenizer.from_pretrained(
    ckpt_path,
    subfolder='tokenizer'
)
text_encoder = RobertaSeriesModelWithTransformation.from_pretrained(
    ckpt_path,
    subfolder='text_encoder'
)
vae = AutoencoderKL.from_pretrained(
    ckpt_path, 
    subfolder='vae',
)
model.to(device).eval().requires_grad_(False)
text_encoder.to(device).eval().requires_grad_(False)
vae.to(device).eval().requires_grad_(False)
...

# %%
# 指定特殊的 score model ckpt 和 dataset
# ds_path = 'data/coco2014_reflow/alt_gen_train_first20'
# ds_path = 'data/coco2014_reflow/alt_gen_train_first100'
# ds_path = 'data/coco2014_reflow/alt_gen_train_rnd20'
ds_path = 'data/coco2014_reflow/alt_gen_val10k'


ds = get_reflow_dataset(
    ds_path,
    # tokenizer=tokenizer,
)
steps = [156250, 150000, 100000, 50000]
score_model_ckpt_dir = 'logs/2reflow_AltInit_v3/checkpoints'

# %%
# 指定数据 idx 并加载

def collate(data):
    for k, v in data.items():
        if isinstance(v, torch.Tensor):
            data[k] = v.unsqueeze(0)
    return data

def load_data(data_idx, use_rnd_noise=False):
    data = collate(ds[data_idx])
    data = to_device(data, device)
    z0 = data.pop('noise')
    if use_rnd_noise:
        z0=torch.randn_like(z0)
    z1 = data.pop('latent')
    caption = data['caption']
    tokens = dict(tokenize_caption(caption, tokenizer))
    tokens = collate(tokens)
    tokens = to_device(tokens, device)
    encoder_hidden_states = text_encoder(**tokens)[0]
    condition = {
        'encoder_hidden_states': encoder_hidden_states,
    }
    return z0, z1, condition, caption


# %%
# 采样 t
num_samples = 10
Ts, Te = 1e-3, 1.0
timesteps = torch.arange(start=Ts, end=Te, step=(Te-Ts)/num_samples).to(device)

# %%
# 损失函数

def loss_fn(score, target, type='l1'):
    if type == 'l2':
        loss = (score-target).square()
    elif type == 'l1':
        loss = (score-target).abs()
    return loss

# %%
# zt loss curve
def zt_loss(z0,z1,condition):
    loss_list = []
    target = z1 - z0
    with torch.no_grad():
        for t in tqdm(timesteps):
            t_expand = t.view(-1, 1, 1, 1)
            zt = t_expand * z1 + (1.-t_expand) * z0

            t = (999*t)
            score = model(zt, timestep=t, **condition).sample
            

            loss = loss_fn(score, target)
            loss_list.append(loss.mean().detach().cpu().item())
    loss_list = np.array(loss_list)
    return loss_list

# %%
# sample loss curve
def sample_loss(z0,z1,condition, cheat_per_pre=0.0, cheat_per_post=1.0 ):
    eps=1e-5
    cheat_per_pre=eps - Ts + cheat_per_pre
    cheat_t_pre = 999 * (cheat_per_pre*(Te-Ts)+Ts)

    cheat_per_post= -eps + cheat_per_post
    cheat_t_post = 999 * (cheat_per_post*(Te-Ts)+Ts)

    dt = (Te - Ts)/num_samples # fix bug
    loss_list = []
    sample=z0
    target = z1 - z0
    with torch.no_grad():
        for t in tqdm(timesteps):
            # num_t = i / num_samples * (Te - Ts) + Ts
            # t = torch.ones(1, device=device) * num_t
            t = (999*t)
            pred = model(sample, timestep=t, **condition).sample
            loss = loss_fn(pred, target)
            loss_list.append(loss.mean().detach().cpu().item())
            if t<cheat_t_pre or t>cheat_t_post:
                pred=target
            sample = sample + pred * dt
    loss_list = np.array(loss_list)
    
    sample_image = decode_latents(vae, sample, )
    latent_image = decode_latents(vae, z1)
    grid=make_grid(
        [latent_image.squeeze(),sample_image.squeeze(),],
        nrow=2,
        pad_value=0,
        )
    return loss_list, grid
    
# %%
# 主函数
def main(ckpt_step):
    data_idx=9
    score_model_ckpt_path = os.path.join(score_model_ckpt_dir, f'score_model_s{ckpt_step}.pth')
    model.load_state_dict(torch.load(score_model_ckpt_path, map_location='cpu'), strict=True)
    
    z0, z1, condition, caption = load_data(
        data_idx, use_rnd_noise=False)
    
    zt_loss_curve=zt_loss(z0, z1, condition)
    sample_loss_curve, grid = sample_loss(
        z0,z1,condition, cheat_per_pre=0.1, cheat_per_post=1.0)
    
    return zt_loss_curve, sample_loss_curve, grid, caption

# %%
# 汇总得到所有 ckpt 的信息
zt_losses = []
sample_losses = []
grids = []
for step in tqdm(steps):
    zt_loss_curve, sample_loss_curve, grid, caption = main(step)
    zt_losses.append(zt_loss_curve)
    sample_losses.append(sample_loss_curve)
    grids.append(grid)


# %%
# 画出 zt loss
t_list = timesteps.cpu().numpy()
plt.cla()
for step, zt_loss_curve in zip(steps, zt_losses):
    plt.plot(t_list, zt_loss_curve, label=f'ckpt_s{step}')
plt.title(f'{caption} (N={num_samples})')
plt.xlabel("t")
plt.ylabel("loss")
plt.legend()
plt.show()

# %%
# 画出 sample loss
t_list = timesteps.cpu().numpy()
plt.cla()
for step, sample_loss_curve in zip(steps, sample_losses):
    plt.plot(t_list, sample_loss_curve, label=f'ckpt_s{step}')
plt.title(f'{caption} (N={num_samples})')
plt.xlabel("t")
plt.ylabel("loss")
plt.legend()
plt.show()

# %%
# 使用 ipython 同时显示多组图片
from IPython.display import Image as ipyImage, display
for step in steps:
    print(f'image of ckpt step {step}')
whole_grid = make_grid(grids, nrow=1)
save_image(whole_grid, 'tmp/tmp_grid.jpg')
img=ipyImage(filename=f'tmp/tmp_grid.jpg')
display(img)

# %%
