import torch
from reflow.sde_lib import RectifiedFlow
from reflow.utils import decode_latents, get_model_fn
from reflow.losses.msssim_l1 import ms_ssim_l1_loss
import random


def get_rectified_flow_loss_fn(
    sde,
    train,
    reduce_mean=True,
    eps=1e-3
):
    reduce_op = torch.mean if reduce_mean else lambda *args, **kwargs: 0.5 * \
        torch.sum(*args, **kwargs)

    def loss_fn(model, batch):
        """Compute the loss function.

        Args:
          model: A score model.
          batch: A mini-batch of training data.

        Returns:
          loss: A scalar that represents the average loss value across the mini-batch.
        """
        z1 = batch.pop('z1')
        device = z1.device
        if sde.reflow_flag:
            z0 = batch.pop('z0')
            condition = batch
        else:
            z0 = sde.get_z0(z1).to(device)
        zshape = z0.shape
        bs = zshape[0]

        if sde.reflow_flag:
            # * distill for t = 0 (k=1)
            if sde.reflow_t_schedule == 't0':
                t = torch.zeros(bs, device=device) * (sde.T - eps) + eps
            # * reverse distill for t=1 (fast embedding)
            elif sde.reflow_t_schedule == 't1':
                t = torch.ones(bs, device=device) * (sde.T - eps) + eps
            # * train new rectified flow with reflow
            elif sde.reflow_t_schedule == 'uniform':
                t = torch.rand(bs, device=device) * (sde.T - eps) + eps
            # * k > 1 distillation
            elif type(sde.reflow_t_schedule) == int:
                t = torch.randint(0, sde.reflow_t_schedule, (bs, ), device=device) * (
                    sde.T - eps) / sde.reflow_t_schedule + eps
            elif sde.reflow_t_schedule == 'other': # TODO : 临时添加的小范围 t 训练
                t = torch.rand(bs, device=device) * 0.2
            # # * weighted t sampling
            # elif sde.reflow_t_schedule == 'weighted':
            #     t = 0.05*torch.rand(bs, device=device) * (sde.T - eps) + eps
            else:
                raise (NotImplementedError('non-existing reflow t schedule'))
        else:
            # standard rectified flow loss
            t = torch.rand(bs, device=device) * (sde.T - eps) + eps

        # TODO: zt compress rate 将会把 zt 对应的 t 等比例压缩到 (0, zt_compress_rate) 范围内。比如 t=0.8 , zt_compress_rate=0.2 , 那么 zt =  0.16 * z1 + 0.84 * z0 (0.16=0.8*0.2)
        zt_compress_rate = sde.zt_compress_rate
        if zt_compress_rate is not None:
            assert 0.0<zt_compress_rate<=1.0
        t_expand = t.view(-1, 1, 1, 1) * zt_compress_rate
        zt = t_expand * z1 + (1.-t_expand) * z0
        target = z1 - z0

        model_fn = get_model_fn(model, train=train)
        vec_t = (999*t)
        # score = model_fn(perturbed_data, t*999)  # Copy from models/utils.py
        # compatible with diffusers
        score = model_fn(zt, timestep=vec_t, **condition).sample

        def prepare_lpips_input(codec, in0, in1):
            in0 = decode_latents(codec, in0, cpu=False)
            in1 = decode_latents(codec, in1, cpu=False)
            # crop 256px patch
            # _,_,H,W=in0.shape
            # scale=0.5
            # h,w=int(H*scale),int(W*scale)
            # h_start, w_start = random.randint(0,H-h), random.randint(0,W-w)
            # lpips_in0=in0[:,:,h_start:h_start+h, w_start:w_start+w].clone()
            # lpips_in1=in1[:,:,h_start:h_start+h, w_start:w_start+w].clone()
            # del in0, in1
            lpips_in0, lpips_in1 = in0, in1
            return lpips_in0, lpips_in1

        if sde.reflow_flag:
            # we found LPIPS loss is the best for distillation when k=1; but good to have a try
            if sde.reflow_loss == 'l2':
                # train new rectified flow with reflow or distillation with L2 loss
                losses = torch.square(score - target)
            elif sde.reflow_loss == 'l1':
                # train new rectified flow with reflow or distillation with L2 loss
                losses = (score - target).abs()
            elif sde.reflow_loss == 'lpips':
                # assert sde.reflow_t_schedule == 't0'
                losses = sde.lpips_model(*prepare_lpips_input(sde.codec, z0+score, z1), normalize=True)
            elif sde.reflow_loss == 'lpips+l2':
                # assert sde.reflow_t_schedule == 't0'
                lpips_losses = sde.lpips_model(*prepare_lpips_input(sde.codec, z0+score, z1), normalize=True).view(zshape[0], 1)
                l2_losses = torch.square(score - target).view(zshape[0], -1).mean(dim=1, keepdim=True)
                losses = lpips_losses + l2_losses
            elif sde.reflow_loss == 'lpips+l1':
                lpips_losses = sde.lpips_model(*prepare_lpips_input(sde.codec, z0+score, z1), normalize=True).view(zshape[0], 1)
                l1_losses = (score - target).abs().view(zshape[0], -1).mean(dim=1, keepdim=True)
                losses = lpips_losses + l1_losses
            elif sde.reflow_loss == 'msssim+l1':
                losses = ms_ssim_l1_loss(*prepare_lpips_input(sde.codec, z0+score, z1)).unsqueeze(0)
            else:
                raise (NotImplementedError('non-existing reflow loss'))
        else:
            losses = torch.square(score - target)

        losses = reduce_op(losses.reshape(losses.shape[0], -1), dim=-1)
        
        # # TODO : 加 weight mask
        loss_weight = None
        if loss_weight is not None:
            mask = (t < 0.2*(sde.T-eps)+eps) | (t > 0.8*(sde.T-eps)+eps)
            normalizer = mask.sum() * loss_weight + (bs - mask.sum())
            losses = torch.where(mask, loss_weight*losses, losses)
            losses = losses / normalizer

        loss = torch.mean(losses) 
        return loss

    return loss_fn


def pred_score(self, sample, timesteps, prompt_embeds, score_model, controlnet=None, controlnet_cond=None):
    if controlnet:
        down_block_res_samples, mid_block_res_sample = controlnet(
            sample,
            timesteps,
            encoder_hidden_states=prompt_embeds,
            controlnet_cond=controlnet_cond,
            return_dict=False,
        )
        down_block_res_samples = [
            down_block_res_sample * self.controlnet_conditioning_scale
            for down_block_res_sample in down_block_res_samples
        ]
        mid_block_res_sample *= self.controlnet_conditioning_scale
        score_pred = score_model(
            sample,
            timesteps,
            encoder_hidden_states=prompt_embeds,
            down_block_additional_residuals=down_block_res_samples,
            mid_block_additional_residual=mid_block_res_sample,
        ).sample
    else:
        score_pred = score_model(
            sample,
            timesteps,
            encoder_hidden_states=prompt_embeds,
        ).sample
    return score_pred


class ReFlowLoss:
    def __init__(self, sde, train, reduce_mean=True) -> None:
        self.sde = sde
        self.train = train
        self.reduce_mean = reduce_mean
        self.reduce_op = torch.mean if reduce_mean else lambda *args, **kwargs: 0.5 * \
            torch.sum(*args, **kwargs)

    def __call__(self, state, batch):
        if self.train:
            loss = self.compute_loss(state, batch)
        else:
            with torch.no_grad():
                model = state.get('score_model', None)
                model = state.get('controlnet', model)
                assert model is not None
                model: torch.nn.Module
                if state.get('ema', None):
                    ema = state['ema']
                    ema.store(model.parameters())
                    ema.copy_to(model.parameters())
                    loss = self.compute_loss(state, batch)
                    ema.restore(model.parameters())
                else:
                    loss = self.compute_loss(state, batch)
        return loss

    def sample_t(self, bs, device):
        t_schedule, T, eps = self.sde.reflow_t_schedule, self.sde.T, self.sde.eps

        if self.sde.reflow_flag:
            # * distill for t = 0 (k=1)
            if t_schedule == 't0':
                t = torch.zeros(bs, device=device) * (T - eps) + eps
            # * reverse distill for t=1 (fast embedding)
            elif t_schedule == 't1':
                t = torch.ones(bs, device=device) * (T - eps) + eps
            # * train new rectified flow with reflow
            elif t_schedule == 'uniform':
                t = torch.rand(bs, device=device) * (T - eps) + eps
            # * k > 1 distillation
            elif type(t_schedule) == int:
                t = torch.randint(0, t_schedule, (bs, ), device=device) * (
                    T - eps) / t_schedule + eps
            else:
                raise (NotImplementedError('non-existing reflow t schedule'))
        else:
            # standard rectified flow loss
            t = torch.rand(bs, device=device) * (T - eps) + eps
        return t

    def prepare_lpips_input(self, codec, in0, in1):
        in0 = decode_latents(codec, in0, cpu=False)
        in1 = decode_latents(codec, in1, cpu=False)
        # crop 256px patch
        # _,_,H,W=in0.shape
        # scale=0.5
        # h,w=int(H*scale),int(W*scale)
        # h_start, w_start = random.randint(0,H-h), random.randint(0,W-w)
        # lpips_in0=in0[:,:,h_start:h_start+h, w_start:w_start+w].clone()
        # lpips_in1=in1[:,:,h_start:h_start+h, w_start:w_start+w].clone()
        # del in0, in1
        lpips_in0, lpips_in1 = in0, in1
        return lpips_in0, lpips_in1

    def get_loss(self, score_pred, score_target, z0):
        loss_type, t_schedule = self.sde.reflow_loss, self.sde.reflow_t_schedule
        codec = self.sde.codec
        bs = score_pred.shape[0]
        if self.sde.reflow_flag:
            # we found LPIPS loss is the best for distillation when k=1; but good to have a try
            if loss_type == 'l2':
                # train new rectified flow with reflow or distillation with L2 loss
                losses = torch.square(score_pred - score_target)
            elif loss_type == 'lpips':
                assert t_schedule == 't0'
                losses = self.sde.lpips_model(
                    *self.prepare_lpips_input(codec, z0+score_pred, z0+score_target), normalize=True)
            elif loss_type == 'lpips+l2':
                assert t_schedule == 't0'
                lpips_losses = self.sde.lpips_model(
                    *self.prepare_lpips_input(codec, z0+score_pred, z0+score_target), normalize=True).view(bs, 1)
                l2_losses = torch.square(
                    score_pred - score_target).view(bs, -1).mean(dim=1, keepdim=True)
                losses = lpips_losses + l2_losses
            else:
                raise (NotImplementedError('non-existing reflow loss'))
        else:
            losses = torch.square(score_pred - score_target)

        losses = self.reduce_op(losses.reshape(losses.shape[0], -1), dim=-1)
        loss = torch.mean(losses)
        return loss

    def compute_loss(self, state, batch):
        z1 = batch['z1']
        device = z1.device
        if self.sde.reflow_flag:
            z0 = batch['z0']
        else:
            z0 = self.sde.get_z0(z1).to(device)
        zshape = z0.shape
        bs = zshape[0]
        t = self.sample_t(bs, device)
        t_expand = t.view(-1, 1, 1, 1)
        zt = t_expand * z1 + (1.-t_expand) * z0
        target = z1 - z0
        t = (999*t)

        score_pred = pred_score(zt, t, batch['encoder_hidden_states'], state['score_model'], state.get(
            'controlnet', None), batch.get('controlnet_cond', None))

        loss = self.get_loss(score_pred, target, z0)
        return loss


def get_loss_fn(
    sde,
    train,
    reduce_mean=False,
    continuous=True,
    likelihood_weighting=False,
    optimize_fn=None,
):
    if isinstance(sde, RectifiedFlow):
        loss_fn = get_rectified_flow_loss_fn(
            sde, train, reduce_mean=reduce_mean)
        # return ReFlowLoss(sde, train, reduce_mean=reduce_mean)
    else:
        raise ValueError(
            f"Discrete training for {sde.__class__.__name__} is not recommended.")

    def loss_fn_wrapper(state, batch):
        model = state['model']
        if train:
            loss = loss_fn(model, batch)
        else:
            with torch.no_grad():
                if state.get('ema', None):
                    ema = state['ema']
                    ema.store(model.parameters())
                    ema.copy_to(model.parameters())
                    loss = loss_fn(model, batch)
                    ema.restore(model.parameters())
                else:
                    loss = loss_fn(model, batch)

        return loss

    return loss_fn_wrapper


# # ! deprecated
# def get_step_fn(sde, train, optimize_fn=None, reduce_mean=False, continuous=True, likelihood_weighting=False):
#     """Create a one-step training/evaluation function.

#     Args:
#       sde: An `sde_lib.SDE` object that represents the forward SDE.
#       optimize_fn: An optimization function.
#       reduce_mean: If `True`, average the loss across data dimensions. Otherwise sum the loss across data dimensions.
#       continuous: `True` indicates that the model is defined to take continuous time steps.
#       likelihood_weighting: If `True`, weight the mixture of score matching losses according to
#             https://arxiv.org/abs/2101.09258; otherwise use the weighting recommended by our paper.

#     Returns:
#       A one-step function for training or evaluation.
#     """

#     if isinstance(sde, RectifiedFlow):
#         loss_fn = get_rectified_flow_loss_fn(
#             sde, train, reduce_mean=reduce_mean)
#     else:
#         raise ValueError(
#             f"Discrete training for {sde.__class__.__name__} is not recommended.")

#     def step_fn(state, batch):
#         """Running one step of training or evaluation.

#         This function will undergo `jax.lax.scan` so that multiple steps can be pmapped and jit-compiled together
#         for faster execution.

#         Args:
#           state: A dictionary of training information, containing the score model, optimizer,
#            EMA status, and number of optimization steps.
#           batch: A mini-batch of training/evaluation data.

#         Returns:
#           loss: The average loss value of this state.
#         """
#         model = state['model']
#         if train:
#             optimizer = state['optimizer']
#             optimizer.zero_grad()
#             loss = loss_fn(model, batch)
#             loss.backward()
#             optimize_fn(optimizer, model.parameters(), step=state['step'])
#             state['step'] += 1
#             state['ema'].update(model.parameters())
#         else:
#             with torch.no_grad():
#                 ema = state['ema']
#                 ema.store(model.parameters())
#                 ema.copy_to(model.parameters())
#                 loss = loss_fn(model, batch)
#                 ema.restore(model.parameters())

#         return loss

#     return step_fn
