import sys
sys.path.append('.')
from reflow.utils import _PIPELINES, _SCHEDULERS, nothing
from diffusers import UNet2DConditionModel
from reflow.loss import get_loss_fn
from reflow.ema import ExponentialMovingAverage
from reflow.sde_lib import RectifiedFlow
from reflow.data.dataset import PromptsDataset
from diffusers.optimization import get_scheduler
from accelerate.utils import set_seed
from accelerate import Accelerator
from loguru import logger
from tqdm.auto import trange
from torch.utils.data import DataLoader
import torch
import torch.nn.functional as F
from torch.utils.tensorboard.writer import SummaryWriter
from pathlib import Path
from ml_collections.config_flags import config_flags
from absl import flags
from absl import app
from functools import partial
import random
from diffusers import DDPMScheduler


def sd_loss_fn_hijack(state, batch, t_schedule='uniform', num_training_steps=1000, noise_scheduler:DDPMScheduler=None):
    # TODO : 不知道为什么 finetune sd 的模式比 reflow 的模式占用显存高多了；单卡 bs 从 32->8 ; 变化应该只有 添加了 noise_scheduler 和 更换了 loss_fn ; 搞清楚原因是什么
    noise = batch.pop('z0')
    latents = batch.pop('z1')
    condition = batch
    unet = state['model']
    
    bs=latents.shape[0]
    
    if t_schedule=='uniform':
        timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bs,))
    elif t_schedule=='t0':
        timesteps = torch.zeros((bs,))
    else:
        raise (NotImplementedError('non-existing sd t schedule'))
    
    timesteps = timesteps.to(device=latents.device, dtype=torch.long)
    noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
    target = noise
    
    model_pred = unet(noisy_latents, timesteps, **condition).sample
    loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
    
    return loss


def main(argv):
    config, workdir = FLAGS.config, FLAGS.workdir
    workdir = Path(workdir)

    # Create directories for experimental logs
    sample_dir = workdir/"samples"
    sample_dir.mkdir(parents=True, exist_ok=True)

    tb_dir = workdir/"tensorboard"
    tb_dir.mkdir(exist_ok=True)
    writer = SummaryWriter(str(tb_dir))

    accelerator = Accelerator(
        gradient_accumulation_steps=config.training.gradient_accumulation_steps,
        mixed_precision=config.training.mixed_precision,
    )
    
    set_seed(config.seed, device_specific=True)

    if accelerator.is_main_process:
        logger.add(str(workdir / 'exp.log'))
        logger.info(f'\n{config}')
        logger.info(f'comment : {FLAGS.comment}')


    weight_dtype = torch.float32
    if config.training.mixed_precision == "fp16":
        weight_dtype = torch.float16
    if config.training.mixed_precision == "bf16":
        weight_dtype = torch.bfloat16

    # 创建 diffusion model pipeline 和 scheduler (比如 alt_diffusion 和 dpm solver multistep)
    pipeline_cls = _PIPELINES[config.diffusers.pipeline]
    scheduler_cls = _SCHEDULERS[config.diffusers.scheduler]

    pipeline = pipeline_cls.from_pretrained(
        config.diffusers.pipeline_ckpt,
        torch_dtype=weight_dtype,
        safety_checker=None,
        requires_safety_checker=False,
    )
    if config.reflow.finetune_sd == 'yes':
        noise_scheduler = DDPMScheduler.from_config(pipeline.scheduler.config)
    pipeline.scheduler = scheduler_cls.from_config(pipeline.scheduler.config)
    
    pipeline = pipeline.to(accelerator.device)
    
    
    if config.diffusers.use_redit:
        redit_pipeline_cls = _PIPELINES[config.diffusers.redit_pipeline]
        redit_pipeline = redit_pipeline_cls.from_pretrained(
            config.diffusers.redit_pipeline_ckpt,
            torch_dtype=weight_dtype,
            safety_checker=None,
            requires_safety_checker=False,
        )
        redit_scheduler_cls = _SCHEDULERS[config.diffusers.redit_scheduler]
        redit_pipeline.scheduler = redit_scheduler_cls.from_config(redit_pipeline.scheduler.config)
        if config.diffusers.use_xformers:
            redit_pipeline.enable_xformers_memory_efficient_attention()
        redit_pipeline = redit_pipeline.to(accelerator.device)
        redit_pipeline.scheduler.set_timesteps(config.diffusers.redit_inference_steps, device=accelerator.device)

    # 加载新的 reflow model
    
    # TODO:
    reflow_model = UNet2DConditionModel.from_pretrained(
        config.diffusers.pipeline_ckpt,
        subfolder='unet',
    )
    
    if not nothing(config.diffusers.reflow_ckpt_inference):
        pipeline.unet.load_state_dict(torch.load(config.diffusers.reflow_ckpt_inference, map_location='cpu'), strict=True)
    if not nothing(config.diffusers.reflow_ckpt_train):
        reflow_model.load_state_dict(torch.load(config.diffusers.reflow_ckpt_train, map_location='cpu'), strict=True)

    if config.diffusers.gradient_checkpointing:
        reflow_model.enable_gradient_checkpointing()
    if config.diffusers.use_xformers:
        try:
            reflow_model.enable_xformers_memory_efficient_attention()
            pipeline.enable_xformers_memory_efficient_attention()
            if config.diffusers.use_redit:
                redit_pipeline.enable_xformers_memory_efficient_attention()
        except Exception as e:
            logger.warning(
                f"Could not enable memory efficient attention. Make sure xformers is installed correctly and a GPU is available: {e}"
            )

    model = reflow_model

    ema = None
    if accelerator.is_main_process:
        ema = ExponentialMovingAverage(
            model.parameters(), decay=config.ema.decay)

    # Initialize the optimizer
    if config.optim.use_8bit_adam:
        try:
            import bitsandbytes as bnb
        except ImportError:
            raise ImportError(
                "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
            )
        optimizer_cls = bnb.optim.AdamW8bit
    else:
        optimizer_cls = torch.optim.AdamW

    optimizer = optimizer_cls(
        model.parameters(),
        lr=config.optim.lr,
        betas=config.optim.betas,
        weight_decay=config.optim.weight_decay,
        eps=config.optim.eps,
    )

    lr_scheduler = get_scheduler(
        config.optim.lr_scheduler,
        optimizer=optimizer,
        num_warmup_steps=config.optim.warmup *
        config.training.gradient_accumulation_steps,
        num_training_steps=config.training.num_steps *
        config.training.gradient_accumulation_steps,
    )

    # # * 需要准备的是能够随机产生 prompt 的数据集
    def get_device_index(device:torch.device):
        if str(device) in ['cuda', 'cpu'] :
            index = 0
        else:
            index = device.index
        return index

    train_ds = PromptsDataset(
        caption_path=config.data.caption_path,
        random_seed=config.seed,
        device_index=get_device_index(accelerator.device),
    )
    train_dl = DataLoader(
        train_ds,
        batch_size=config.training.batch_size,
        num_workers=config.data.dl_workers,
    )
    # TODO : 直接把 dataloader 写成按照每个 (device, process ) 随机采样，但这样将不能保存和恢复 random_seed 的状态
    model, optimizer, lr_scheduler = accelerator.prepare(
        model, optimizer, lr_scheduler
    )
    train_iter = iter(train_dl)

    initial_step = 1
    ckpt_path = config.training.ckpt_path
    if not nothing(ckpt_path):
        global_step = int(ckpt_path.split(
            '/')[-1].split('_')[-1][1:])  # checkpoint_s{xxx}
        initial_step = global_step + 1
        accelerator.load_state(f'{ckpt_path}')
        if accelerator.is_main_process:
            ema_path = str(Path(ckpt_path).parent /
                           f'score_model_s{global_step}.pth')
            ema_params = torch.load(ema_path, map_location='cpu').values()
            ema = ExponentialMovingAverage(ema_params, decay=config.ema.decay)
            
    if ema:
        ema.to(accelerator.device)

    state = dict(model=model, ema=ema)
    checkpoint_dir = workdir/'checkpoints'
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    if accelerator.is_main_process:
        accelerator.init_trackers("tmp", config=vars(config))

    sde = RectifiedFlow(
        init_type=config.sampling.init_type,
        noise_scale=config.sampling.init_noise_scale,
        reflow_flag=True,
        reflow_t_schedule=config.reflow.reflow_t_schedule,
        reflow_loss=config.reflow.reflow_loss,
        # use_ode_sampler=config.sampling.use_ode_sampler,
        # sample_N=config.sampling.sample_N,
        codec=pipeline.vae,
        device=accelerator.device,
        zt_compress_rate=config.reflow.zt_compress_rate,
    )

    reduce_mean = config.training.reduce_mean
    if config.reflow.finetune_sd == 'yes':
        train_loss_fn = partial(sd_loss_fn_hijack, t_schedule=config.reflow.sd_t_schedule, noise_scheduler=noise_scheduler)
    else:
        train_loss_fn = get_loss_fn(sde, train=True, reduce_mean=reduce_mean,)

    num_train_steps = config.training.num_steps
    if accelerator.is_main_process:
        logger.info(f'REFLOW T SCHEDULE: {config.reflow.reflow_t_schedule}')
        logger.info(f'LOSS: {config.reflow.reflow_loss}')
        logger.info(f"Starting reflow training loop at step {initial_step}.")

    pbar = trange(1, num_train_steps+1, desc='Steps',
                  disable=not accelerator.is_local_main_process)

    # 每一步需要使用 pipeline 先产生数据，组织成 reflow data 格式，然后训练 reflow model
    for step in pbar:
        train_loss = torch.tensor(0.0, device=accelerator.device)
        for _ in range(config.training.gradient_accumulation_steps):

            prompts = next(train_iter)['prompt']
            if step < initial_step:
                continue

            # * 需要一个 batch 数量的 prompts
            example = pipeline.inference_latent(
                prompt=prompts,
                num_inference_steps=config.diffusers.num_inference_steps,
                guidance_scale=config.diffusers.guidance_scale,
                start_step=config.diffusers.start_step,
                stop_step=config.diffusers.stop_step,
                disable_pbar=True,
            )
            
            if config.diffusers.use_redit:
                sample_latent = example['latent']
                noisy_latent, start_step = redit_pipeline.add_noise(config.diffusers.redit_inference_steps, strength=config.diffusers.redit_strength, latents=sample_latent, )
                redit_latent = redit_pipeline.inference_latent(
                    prompt=prompts,
                    latents=noisy_latent,
                    num_inference_steps=config.diffusers.redit_inference_steps,
                    guidance_scale=config.diffusers.redit_guidance_scale,
                    disable_pbar=True,
                    start_step=start_step
                )['latent']
                example['latent'] = redit_latent

            batch = {
                'z0': example['noise'],
                'z1': example['latent'],
                'encoder_hidden_states': example['text_embeddings'],
            }
            
            # occasionally unconditional training 
            if random.random() < config.training.p_uncond:
                uncond_text_embeddings = pipeline._encode_prompt(
                    [""]*len(prompts),
                    accelerator.device,
                    1,
                    False,
                )
                batch['encoder_hidden_states'] = uncond_text_embeddings.to(batch['encoder_hidden_states'])
                
                
            with accelerator.accumulate(model):
                loss = train_loss_fn(state, batch)
                avg_loss = accelerator.gather(
                    loss.repeat(config.training.batch_size)).mean()
                train_loss += avg_loss

                accelerator.backward(loss)
                if accelerator.sync_gradients:
                    accelerator.clip_grad_norm_(
                        model.parameters(), config.optim.grad_clip)

                optimizer.step()
                lr_scheduler.step()
                optimizer.zero_grad()

        train_loss = train_loss / config.training.gradient_accumulation_steps

        if accelerator.sync_gradients:
            pbar.set_postfix({
                'train_loss': train_loss.item(),
                'lr': lr_scheduler.get_lr()[0],
                'step': step,
            })
            writer.add_scalar("training_loss", train_loss, step)
            
            if accelerator.is_main_process and step >= initial_step:
                state['ema'].update(model.parameters())
                if step % config.training.log_freq == 0:
                    logger.info(
                        f'step {step} | training_loss {train_loss.item():.5f}')
                    # writer.add_scalar("training_loss", train_loss, step)
                    
                if step % config.training.snapshot_freq == 0 or step == num_train_steps:
                    # Save the checkpoint.
                    accelerator.save_state(
                        str(checkpoint_dir / f'checkpoint_s{step}'))
                    model_to_save = accelerator.unwrap_model(model)
                    ema.copy_to(model_to_save.parameters())
                    torch.save(model_to_save.state_dict(), str(
                        checkpoint_dir / f'score_model_s{step}.pth'))
    pbar.close()


if __name__ == "__main__":
    FLAGS = flags.FLAGS

    config_flags.DEFINE_config_file(
        "config", None, "Training configuration.", lock_config=True)
    flags.DEFINE_string("workdir", None, "Work directory.")
    flags.DEFINE_string("eval_folder", "eval",
                        "The folder name for storing evaluation results")
    flags.DEFINE_string("comment", None, "complementary info of exp")
    flags.mark_flags_as_required(["workdir", "config"])

    app.run(main)
