import sys
sys.path.append('.')
from absl import app
from absl import flags
from ml_collections.config_flags import config_flags
from pathlib import Path
from torch.utils.tensorboard.writer import SummaryWriter
import torch
from torch.utils.data import DataLoader
from tqdm.auto import trange
from torchvision.utils import make_grid, save_image
import numpy as np
from math import ceil
from loguru import logger

from accelerate import Accelerator
from diffusers.optimization import get_scheduler

from reflow.utils import create_models, to_device, cycle, set_seed, decode_latents
from reflow.data.utils import LMDB_ndarray
from reflow.data.dataset import get_reflow_dataset
from reflow.sde_lib import RectifiedFlow
from reflow.ema import ExponentialMovingAverage
from reflow.sample import get_sampling_fn
from reflow.loss import get_loss_fn

def main(argv):
    config, workdir = FLAGS.config, FLAGS.workdir
    workdir = Path(workdir)

    set_seed(config.seed)

    # Create directories for experimental logs
    sample_dir = workdir/"samples"
    sample_dir.mkdir(parents=True, exist_ok=True)

    tb_dir = workdir/"tensorboard"
    tb_dir.mkdir(exist_ok=True)
    writer = SummaryWriter(str(tb_dir))

    accelerator = Accelerator(
        gradient_accumulation_steps=config.training.gradient_accumulation_steps,
        mixed_precision=config.training.mixed_precision,
    )

    if accelerator.is_main_process:
        logger.add(str(workdir / 'exp.log'))
        logger.info(f'\n{config}')
        logger.info(f'comment : {FLAGS.comment}')

    tokenizer, text_encoder, vae, score_model = create_models(config)

    weight_dtype = torch.float32
    if config.training.mixed_precision == "fp16":
        weight_dtype = torch.float16
    if config.training.mixed_precision == "bf16":
        weight_dtype = torch.bfloat16
    vae.to(accelerator.device, dtype=weight_dtype)
    text_encoder.to(accelerator.device, dtype=weight_dtype)
    
    model = score_model
    
    ema=None
    if accelerator.is_main_process:
        ema = ExponentialMovingAverage(model.parameters(), decay=config.ema.decay)

    # Initialize the optimizer
    if config.optim.use_8bit_adam:
        try:
            import bitsandbytes as bnb
        except ImportError:
            raise ImportError(
                "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
            )
        optimizer_cls = bnb.optim.AdamW8bit
    else:
        optimizer_cls = torch.optim.AdamW
    optimizer = optimizer_cls(
        model.parameters(),
        lr=config.optim.lr,
        betas=config.optim.betas,
        weight_decay=config.optim.weight_decay,
        eps=config.optim.eps,
    )

    lr_scheduler = get_scheduler(
        config.optim.lr_scheduler,
        optimizer=optimizer,
        num_warmup_steps=config.optim.warmup *
        config.training.gradient_accumulation_steps,
        num_training_steps=config.training.num_steps *
        config.training.gradient_accumulation_steps,
    )

    train_ds = get_reflow_dataset(
        data_root=config.data.train_root,
        tokenizer=tokenizer,
        src_type='lmdb',

        train=True,
        random_flip=config.data.random_flip,
        p_uncond=config.training.p_uncond,
    )
    eval_ds = get_reflow_dataset(
        data_root=config.data.eval_root,
        tokenizer=tokenizer,
        src_type='lmdb',
    )
    train_dl = DataLoader(
        train_ds,
        batch_size=config.training.batch_size,
        shuffle=True,
        num_workers=config.data.dl_workers,
        drop_last=True,
    )
    eval_dl = DataLoader(
        eval_ds,
        batch_size=config.training.batch_size,
        shuffle=False,
        num_workers=config.data.dl_workers,
        drop_last=True,
    )

    model, optimizer, train_dl, lr_scheduler = accelerator.prepare(
        model, optimizer, train_dl, lr_scheduler
    )
    train_iter = cycle(train_dl)
    eval_iter = cycle(eval_dl)

        
    initial_step = 1
    ckpt_path = config.training.ckpt_path
    if ckpt_path is not None:
        global_step = int(ckpt_path.split(
            '/')[-1].split('_')[-1][1:])  # checkpoint_s{xxx}
        initial_step = global_step + 1
        accelerator.load_state(f'{ckpt_path}')
        if accelerator.is_main_process:
            ema_path = str(Path(ckpt_path).parent / f'score_model_s{global_step}.pth')
            ema_params = torch.load(ema_path, map_location='cpu').values()
            ema = ExponentialMovingAverage(ema_params, decay=config.ema.decay)            
    if ema:
        ema.to(accelerator.device)
    
    state = dict(model=model, ema=ema) 
    checkpoint_dir = workdir/'checkpoints'
    checkpoint_dir.mkdir(parents=True, exist_ok=True)

    if accelerator.is_main_process:
        accelerator.init_trackers("tmp", config=vars(config))

    sde = RectifiedFlow(
        init_type=config.sampling.init_type,
        noise_scale=config.sampling.init_noise_scale,
        reflow_flag=True,
        reflow_t_schedule=config.reflow.reflow_t_schedule,
        reflow_loss=config.reflow.reflow_loss,
        use_ode_sampler=config.sampling.use_ode_sampler,
        sample_N=config.sampling.sample_N,
        codec=vae,
        device=accelerator.device,
        zt_compress_rate=config.reflow.zt_compress_rate,
    )

    # Building sampling functions
    sampling_shape = (config.training.batch_size, config.data.num_channels,
                      config.data.image_size, config.data.image_size)
    sampling_fn = get_sampling_fn(
        config, sde, sampling_shape)  # NOTE 默认使用 euler

    reduce_mean = config.training.reduce_mean
    train_loss_fn = get_loss_fn(sde, train=True, reduce_mean=reduce_mean,)
    eval_loss_fn = get_loss_fn(sde, train=False, reduce_mean=reduce_mean,)

    num_train_steps = config.training.num_steps
    if accelerator.is_main_process:
        logger.info(f'REFLOW T SCHEDULE: {config.reflow.reflow_t_schedule}')
        logger.info(f'LOSS: {config.reflow.reflow_loss}')
        logger.info(f"Starting reflow training loop at step {initial_step}.")

    def prepare_step_fn_input(batch):
        z0 = batch.pop('noise')
        z1 = batch.pop('latent')
        encoder_hidden_states = text_encoder(**batch)[0]
        return {
            'z0': z0,
            'z1': z1,
            'encoder_hidden_states': encoder_hidden_states,
        }

    pbar = trange(1, num_train_steps+1, desc='Steps',
                  disable=not accelerator.is_local_main_process)

    for step in pbar:
        train_loss = torch.tensor(0.0, device=accelerator.device)
        for _ in range(config.training.gradient_accumulation_steps):
            batch = next(train_iter)
            if step < initial_step:
                continue
            # # TODO
            # if step%10==0 and accelerator.is_main_process:
            #     s = tokenizer.batch_decode(batch['input_ids'])[0]
            #     s=s[4:s.find("<pad>")-4]
            #     print(s)
            if config.training.randz0 == 'random':
                # 1-reflow , random noise for same target
                batch['noise'] = torch.randn_like(batch['noise'])
            with accelerator.accumulate(model):
                loss = train_loss_fn(state, prepare_step_fn_input(batch))
                avg_loss = accelerator.gather(loss.repeat(config.training.batch_size)).mean()
                train_loss += avg_loss

                accelerator.backward(loss)
                if accelerator.sync_gradients:
                    accelerator.clip_grad_norm_(
                        model.parameters(), config.optim.grad_clip)

                optimizer.step()
                lr_scheduler.step()
                optimizer.zero_grad()

        train_loss = train_loss / config.training.gradient_accumulation_steps

        if accelerator.sync_gradients:
            pbar.set_postfix({
                'train_loss': train_loss.item(),
                'lr': lr_scheduler.get_lr()[0],
                'step': step,
            })

            if accelerator.is_main_process and step >= initial_step:
                state['ema'].update(model.parameters())
                if step % config.training.log_freq == 0:
                    logger.info(
                        f'step {step} | training_loss {train_loss.item():.5f}')
                    writer.add_scalar("training_loss", train_loss, step)

                if step % config.training.eval_freq == 0:
                    eval_batch = to_device(next(eval_iter), accelerator.device)
                    eval_loss = eval_loss_fn(
                        state, prepare_step_fn_input(eval_batch))
                    logger.info(f'step {step} | eval_loss {eval_loss.item():.5f}')
                    writer.add_scalar("eval_loss", eval_loss, step)

                if step % config.training.snapshot_freq == 0 or step == num_train_steps:
                    # Save the checkpoint.
                    accelerator.save_state(
                        str(checkpoint_dir / f'checkpoint_s{step}'))
                    model_to_save = accelerator.unwrap_model(model)
                    ema.copy_to(model_to_save.parameters())
                    torch.save(model_to_save.state_dict(), str(
                        checkpoint_dir / f'score_model_s{step}.pth'))

                if step % config.training.sampling_freq == 0 or step == num_train_steps:
                    # Generate and save samples
                    eval_batch = to_device(next(eval_iter), accelerator.device)

                    bs = eval_batch['input_ids'].shape[0]
                    captions = tokenizer.batch_decode(eval_batch['input_ids'])[:bs]
                    captions = [s[4:s.find("<pad>")-4] for s in captions]
                    with open(str(sample_dir / f'caption_s{step}.txt'), 'w') as cap_file:
                        cap_file.write('\n'.join(captions)+'\n')

                    
                    eval_step_fn_input = prepare_step_fn_input(eval_batch)
                    z0 = eval_step_fn_input.pop('z0')
                    z1 = eval_step_fn_input.pop('z1')

                    ema.store(model.parameters())
                    ema.copy_to(model.parameters())
                    sample, nfe = sampling_fn(
                        model,
                        z=None if config.sampling.randz0 == 'random' else z0,
                        condition=eval_step_fn_input,
                    )
                    ema.restore(model.parameters())
                    
                    logger.info(f'number function evaluations : {nfe}')

                    images = decode_latents(vae, sample)
                    nrow = ceil(np.sqrt(sample.shape[0]))
                    image_grid = make_grid(images, nrow, padding=2)
                    save_image(image_grid, str(sample_dir / f'sample_s{step}.jpg'))
                    
                    images = decode_latents(vae, z1)
                    # nrow = ceil(np.sqrt(sample.shape[0]))
                    image_grid = make_grid(images, nrow, padding=2)
                    save_image(image_grid, str(sample_dir / f'latent_s{step}.jpg'))

    pbar.close()


if __name__ == "__main__":
    FLAGS = flags.FLAGS

    config_flags.DEFINE_config_file(
        "config", None, "Training configuration.", lock_config=True)
    flags.DEFINE_string("workdir", None, "Work directory.")
    flags.DEFINE_string("eval_folder", "eval",
                        "The folder name for storing evaluation results")
    flags.DEFINE_string("comment", None, "complementary info of exp")
    flags.mark_flags_as_required(["workdir", "config"])

    app.run(main)
