"""迭代一个 reflow dataset 进行采样
可以控制使用随机噪音或者dataset 中的噪音
"""
import sys
sys.path.append('.')
from absl import app
from absl import flags
from ml_collections.config_flags import config_flags
from pathlib import Path
import torch
from torch.utils.data import DataLoader
from torchvision.utils import make_grid, save_image
from loguru import logger
from tqdm.auto import tqdm
import json


from reflow.utils import create_models, to_device, set_seed, decode_latents
from reflow.data.utils import LMDB_ndarray
from reflow.data.dataset import get_reflow_dataset
from reflow.sde_lib import RectifiedFlow
from reflow.sample import get_sampling_fn
from copy import deepcopy
from reflow.data.dataset import tokenize_caption

@torch.no_grad()
def compute_loss(output:torch.Tensor, target:torch.Tensor, lpips_model, vae):
    def prep_lpips_input(latent1, latent2):
        in1 = decode_latents(vae, latent1, cpu=False)
        in2 = decode_latents(vae, latent2, cpu=False)
        return in1,in2
    
    bs=output.shape[0]
    l1_loss = (output-target).abs().reshape(bs,-1).mean(dim=-1).detach().cpu().tolist()
    l2_loss = (output-target).square().reshape(bs,-1).mean(dim=-1).detach().cpu().tolist()
    lpips_loss = lpips_model(*prep_lpips_input(output, target), normalize=True).reshape(bs,-1).mean(dim=-1).detach().cpu().tolist()
    loss_info={
        'l1':l1_loss,
        'l2':l2_loss,
        'lpips':lpips_loss,
    }
    return loss_info

def merge_batch_loss_info(loss_info, batch_loss_info):
    for i, (l1_loss, l2_loss, lpips_loss) in enumerate(zip(batch_loss_info['l1'], batch_loss_info['l2'], batch_loss_info['lpips'], ), start=len(loss_info)):
        loss_info.append(
            dict(
                l1=l1_loss,
                l2=l2_loss,
                lpips=lpips_loss,
            )
        )
    return loss_info

def main(argv):

    config, eval_folder = FLAGS.config, FLAGS.eval_folder
    eval_folder = Path(eval_folder)
    
    logger.add(f'{eval_folder}/run.log')
    logger.info(f'\n{config}')
    
    sample_dir = eval_folder/"samples"
    sample_dir.mkdir(parents=True, exist_ok=True)
    
    other_dirs = ["traj", "noise", "latent"]
    for odir in other_dirs:
        (eval_folder/odir).mkdir(exist_ok=True)
        
    caption_path = eval_folder / "caption.txt"
    img_suffix='jpg'    

    set_seed(config.seed)
    
    tokenizer, text_encoder, vae, score_model = create_models(config)

    vae.to(config.device).eval()
    text_encoder.to(config.device).eval()
    score_model.to(config.device).eval()
    
    eval_ds = get_reflow_dataset(
        data_root=config.data.eval_root,
        tokenizer=tokenizer,
        src_type='lmdb',
    )
    eval_dl = DataLoader(
        eval_ds,
        batch_size=config.sampling.batch_size,
        shuffle=config.data.shuffle, # TODO : possibly set to True to varify cap-img pari correction
        num_workers=config.data.dl_workers,
        drop_last=True, 
    )
    eval_iter=eval_dl
    sde = RectifiedFlow(
        init_type=config.sampling.init_type,
        noise_scale=config.sampling.init_noise_scale,
        reflow_flag=True,
        reflow_t_schedule=config.reflow.reflow_t_schedule,
        reflow_loss='lpips', # to get lpips_model
        use_ode_sampler=config.sampling.use_ode_sampler,
        sample_N=config.sampling.sample_N,
        device=config.device,
    )
    # Building sampling functions
    sampling_shape = (config.sampling.batch_size, config.data.num_channels, config.data.image_size, config.data.image_size)
    sampling_fn = get_sampling_fn(
        config, sde, sampling_shape) 

    # sample_total = config.sampling.num_samples
    sample_cnt = 0 
    sample_total=len(eval_ds) - len(eval_ds)%config.sampling.batch_size
    
    # 准备 uncond text embedding
    empty_str = [""]*config.sampling.batch_size
    empty_token=tokenize_caption(empty_str, tokenizer)
    empty_token=to_device(dict(empty_token), config.device)

    uncond_condition = {
        'encoder_hidden_states':text_encoder(**empty_token)[0]
    }
    guidance_scale=config.sampling.get('guidance_scale', 1.0)
    
    def prepare_step_fn_input(batch):
        z0 = batch.pop('noise')
        z1 = batch.pop('latent')
        encoder_hidden_states = text_encoder(**batch)[0]
        return {
            'z0': z0,
            'z1': z1,
            'encoder_hidden_states': encoder_hidden_states,
        }
        
    caption_file = open(str(caption_path), 'w')
    pbar=tqdm(total=sample_total, desc='Samples')
    
    loss_info=[]
    
    with torch.no_grad():
        for batch in eval_iter:
            batch=to_device(batch, config.device)
            bs = batch['input_ids'].shape[0]
                
            if config.sampling.decode_noise:
                # decode noise and save
                images = decode_latents(vae, batch['noise'])
                images=images[:bs]
                for i, image in enumerate(images, start=sample_cnt):
                    save_image(image, str(eval_folder / "noise" / f'noise_{i}.{img_suffix}'))

            if config.sampling.decode_latent:
                # decode latent and save
                images = decode_latents(vae, batch['latent'])
                images=images[:bs]
                for i, image in enumerate(images, start=sample_cnt):
                    save_image(image, str(eval_folder / "latent" / f'latent_{i}.{img_suffix}'))

            # decode and save sampling captions
            captions = tokenizer.batch_decode(batch['input_ids'])[:bs]
            captions = [s[4:s.find("<pad>")-4] for s in captions]
            caption_file.write('\n'.join(captions)+'\n')

            # sample, decode and save samples
            batch_copy = deepcopy(batch)
            eval_step_fn_input = prepare_step_fn_input(batch_copy)
            z0 = eval_step_fn_input.pop('z0')
            z1 = eval_step_fn_input.pop('z1')
            sample, *ret = sampling_fn(
                score_model,
                z = None if config.sampling.randz0 == 'random' else z0,
                condition = eval_step_fn_input,
                return_traj = config.sampling.return_traj, 
                uncond_condition=uncond_condition,
                guidance_scale=guidance_scale,
            )
            nfe=ret[0]
            logger.info(f'number of function evaluation: {nfe}')
            
            images = decode_latents(vae, sample)
            images=images[:bs]
            for i, image in enumerate(images, start=sample_cnt):
                save_image(image, str(sample_dir / f'sample_{i}.{img_suffix}'))

            if config.sampling.return_traj:
                traj = ret[1][:bs]
                for i, traj_i in enumerate(traj, start=sample_cnt):
                    traj_i = decode_latents(vae, traj_i)
                    traj_i = make_grid(traj_i, nrow=len(traj_i), padding=2)
                    save_image(traj_i, str(eval_folder / "traj" / f'traj_{i}.{img_suffix}'))
                    
            if config.sampling.compute_loss:
                batch_loss_info=compute_loss(sample, z1, sde.lpips_model, vae)
                loss_info=merge_batch_loss_info(loss_info, batch_loss_info)

            sample_cnt += bs
            pbar.update(bs)
            
    caption_file.close()
    pbar.close()
    
    if len(loss_info)>0:
        json.dump(loss_info, open(str(eval_folder / 'loss_info.json'), 'w'))
    
if __name__ == "__main__":
    FLAGS = flags.FLAGS

    config_flags.DEFINE_config_file(
        "config", None, "Sampling configuration.", lock_config=True)
    flags.DEFINE_string("eval_folder", None,
                        "The folder name for storing evaluation results")
    flags.mark_flags_as_required(["eval_folder", "config"])
    app.run(main)
