# 取一个 reflow 数据集 ds
# 从 ds 中取一个样例 data
# 取得 data 的 noise 和 latent
# 均匀采样不同时间点的 t
# 计算不同 t 之下的 loss
# 将所有的 loss 保存，打印出图像

# %%
# 确定配置
import torch
from diffusers import UNet2DConditionModel, AutoencoderKL
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import RobertaSeriesModelWithTransformation
from transformers import XLMRobertaTokenizer
from reflow.data.dataset import get_reflow_dataset, tokenize_caption
from reflow.utils import to_device, decode_latents
from tqdm.auto import tqdm, trange
from torchvision.utils import make_grid
from copy import deepcopy
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt

gpu = 0
device = f'cuda:{gpu}' if gpu >= 0 else 'cpu'
ckpt_path = 'checkpoints/AltDiffusion'
# %%
# 加载所有的 model
model = UNet2DConditionModel.from_pretrained(
    ckpt_path,
    subfolder='unet'
)
tokenizer = XLMRobertaTokenizer.from_pretrained(
    ckpt_path,
    subfolder='tokenizer'
)
text_encoder = RobertaSeriesModelWithTransformation.from_pretrained(
    ckpt_path,
    subfolder='text_encoder'
)
vae = AutoencoderKL.from_pretrained(
    ckpt_path, 
    subfolder='vae',
)
model.to(device).eval().requires_grad_(False)
text_encoder.to(device).eval().requires_grad_(False)
vae.to(device).eval().requires_grad_(False)
...



# %% 
# make hybrid model
class HybridModel(torch.nn.Module):
    def __init__(self, model1, model2, zt_compress_rate=0.2) -> None:
        super().__init__()
        self.model1=model1
        self.model2=model2
        
        self.zt_compress_rate = zt_compress_rate
        
    def forward(self, sample, timestep, **condition):
        if timestep<(999*self.zt_compress_rate):
            timestep = timestep / zt_compress_rate
            model=self.model1
        else:
            model=self.model2
        return model(sample, timestep=timestep, **condition)
    
    @property
    def device(self):
        return self.model1.device

zt_compress_rate=0.2

t_compressed_model = deepcopy(model)
t_compressed_model_path = "logs/coco_val_10k/zt_compress_0.2/checkpoints/score_model_s800.pth"
t_compressed_model.load_state_dict(torch.load(t_compressed_model_path, map_location='cpu'), strict=True)

base_model = deepcopy(model)
base_model_path = "logs/coco_val_10k/base/checkpoints/score_model_s800.pth"
base_model.load_state_dict(torch.load(base_model_path, map_location='cpu'), strict=True)

model = HybridModel(t_compressed_model, base_model, zt_compress_rate).to(device)


# %%
# 损失函数

def loss_fn(score, target, type='l1'):
    if type == 'l2':
        loss = (score-target).square()
    elif type == 'l1':
        loss = (score-target).abs()
    return loss

# %%
# loss curve over t (pred=model(zt, t, c))
def zt_loss(z0,z1,condition,):
    loss_list = []
    target = z1 - z0
    with torch.no_grad():
        for t in tqdm(timesteps[:-1]):
            # TODO
            t_expand = t.view(-1, 1, 1, 1)
            # if t<zt_compress_rate:
            #     t_expand = t_expand * zt_compress_rate
            # print(t_expand)
            zt = t_expand * z1 + (1.-t_expand) * z0

            t = (999*t)
            score = model(zt, timestep=t, **condition).sample

            loss = loss_fn(score, target)
            loss_list.append(loss.mean().detach().cpu().item())
    zt_loss_list = np.array(loss_list)
    
    return zt_loss_list
# %%
# 边采样边计算 loss (pred=model(sample, t, c)) 但是可以 cheat (前 x 步使用 answer)
def sample_loss(z0,z1,condition, cheat_per_pre=0.0, cheat_per_post=1.0 , add_plot=False):
    eps=1e-5
    cheat_per_pre=eps - Ts + cheat_per_pre
    cheat_t_pre = cheat_per_pre*(Te-Ts)+Ts

    cheat_per_post= -eps + cheat_per_post
    cheat_t_post = cheat_per_post*(Te-Ts)+Ts

    loss_list = []
    sample=z0
    target = z1 - z0
    
    cos_list=[]
    norm_list=[]
    
    with torch.no_grad():
        for i in trange(len(timesteps)-1):
            t = timesteps[i]
            t_n = timesteps[i+1]
            # num_t = i / num_samples * (Te - Ts) + Ts
            # t = torch.ones(1, device=device) * num_t
            vec_t = (999*t)
            pred = model(sample, timestep=vec_t, **condition).sample
            loss = loss_fn(pred, target)
            
            # TODO
            norm_list.append(torch.norm(pred,p=2,).item())
            cos_list.append(torch.cosine_similarity(torch.flatten(pred), torch.flatten(target), dim=0).item())
            
            loss_list.append(loss.mean().detach().cpu().item())
            
            if t<cheat_t_pre or t>cheat_t_post:
                pred=target
            
            dt = t_n - t
            sample = sample + pred * dt
    sample_loss_list = np.array(loss_list)
    
    if add_plot:
        print(torch.norm(target,p=2,).item())
        plt.figure()
        plt.plot(timesteps.cpu().numpy(), norm_list, label='norm', color='green')
        # plt.plot(timesteps.cpu().numpy(), [torch.norm(target,p=2,).item()]*num_samples, color='purple')
        plt.title(f'{caption} (N={num_samples})')
        plt.xlabel("t")
        plt.ylabel("norm", color='green')
        plt.legend(loc='upper left')
        plt.twinx()
        plt.plot(timesteps.cpu().numpy(), cos_list, label='cos sim', color='orange')
        plt.ylabel("cos sim", color='orange')
        plt.legend(loc='upper right')
        plt.show()
    
    return sample_loss_list, sample

# %%
# ! 指定 dataset
# ds_path = 'data/coco2014_reflow/alt_gen_train_first20'
# ds_path = 'data/coco2014_reflow/alt_gen_train_first100'
# ds_path = 'data/coco2014_reflow/alt_gen_train_rnd20'
ds_path = 'data/reflow/coco_val_10k'
# ds_path = 'data/coco2014_reflow/alt_gen_val_rnd20'


ds = get_reflow_dataset(
    ds_path,
    src_type='lmdb',
    # tokenizer=tokenizer,
)

# %%
# 指定数据 idx 并加载

def collate(data):
    for k, v in data.items():
        if isinstance(v, torch.Tensor):
            data[k] = v.unsqueeze(0)
    return data

# ! 选定数据 config
data_idx = 2
use_rnd_noise = False

data = collate(ds[data_idx])
data = to_device(data, device)
z0 = data.pop('noise')
if use_rnd_noise:
    z0 = torch.randn_like(z0)
z1 = data.pop('latent')
caption = data['caption']
tokens = dict(tokenize_caption(caption, tokenizer))
tokens = collate(tokens)
tokens = to_device(tokens, device)
encoder_hidden_states = text_encoder(**tokens)[0]
condition = {
    'encoder_hidden_states': encoder_hidden_states,
}

# %%
# main part ; 采样 t 

N1,N2=25,25
Ts, Te = 1e-3, 1
timesteps1 = torch.arange(start=Ts, end=Te, step=(Te-Ts)/N1) * zt_compress_rate
timesteps2 = torch.arange(start=Ts, end=Te, step=(Te-Ts)/N2) * (1-zt_compress_rate) + zt_compress_rate
timesteps = torch.cat([timesteps1, timesteps2], dim=0)
timesteps = torch.tensor( timesteps.tolist() + [1.0] , device=device)

zt_loss_list = zt_loss(z0,z1,condition)
sample_loss_list, sample = sample_loss(
    z0,z1,condition, add_plot=False)
# 注意步数是 0.0, 0.1, ... , 0.8, 0.9 (一共10步)
# %%
# plot
t_list = timesteps[:-1].cpu().numpy()
plt.plot(t_list, zt_loss_list, label='zt', scalex=())
plt.plot(t_list, sample_loss_list, label='sample')
plt.title(f'{caption} (N={N1}+{N2}={N1+N2})')
plt.xlabel("t")
plt.ylabel("loss")
# plt.xlim(-0.05,1.05)
# plt.ylim(0.2,0.6)
plt.legend()
plt.show()
# %%
# 输出 sample 图像和 latent
sample_image = decode_latents(vae, sample, )
latent_image = decode_latents(vae, z1)
grid=make_grid(
    [latent_image.squeeze(),sample_image.squeeze(),],
    nrow=2,
    pad_value=0,
    )
grid=grid.mul(255.).to(dtype=torch.uint8)
grid=grid.permute(1,2,0).numpy()
pil_grid = Image.fromarray(grid)
plt.figure(figsize=(8,16))
plt.title(f'{caption} (N={N1}+{N2}={N1+N2})')
plt.imshow(pil_grid)

# %%
