import math
import numpy as np
from mindspore import nn
import mindspore as ms
import mindspore.common.dtype as mstype
from mindspore import ops
from mindspore.ops import operations as P, constexpr
from .pos_embed import *
from data_utils.data_config import DataManager


class PosEmbedMM(nn.Cell):
    ''' MultiModal Position Embedding, no cls token, sinusiodal'''

    def __init__(self, manager: DataManager, embed_dim):
        super().__init__()
        self.embed_dim = embed_dim
        self.manager = manager
        self.seq_len = manager.seq_len

        ## create pos embedding for each modality
        all_embeds = np.zeros((manager.n_modality, manager.seq_len, embed_dim))
        for m_idx, m in enumerate(manager):
            all_embeds[m_idx, :m.seq_len, :] = get_nd_sincos_pos_embed(embed_dim, m.grid_size)

        self.pos_embed = ms.Tensor(all_embeds, dtype=mstype.float32)
        self.gather = P.Gather()
        self.expanddim = P.ExpandDims()

    def construct(self, m_idx):
        pos_embed = self.gather(self.pos_embed, m_idx, 0)
        return pos_embed


class AttentionMaskMM(nn.Cell):
    def __init__(self, manager: DataManager):
        super().__init__()
        self.manager = manager
        self.seq_len = manager.seq_len

        # create attention mask for each modality
        all_masks = []
        for m_idx, m in enumerate(manager):
            m_seq_len = manager.m_seq_lens[m_idx]
            repeat = manager.group_size[m_idx]
            mask = np.zeros((self.seq_len, self.seq_len))
            for r in range(repeat):
                lo = r * m_seq_len
                hi = (r + 1) * m_seq_len
                mask[lo:hi, lo:hi] = 1
            all_masks.append(mask)
        all_masks = np.stack(all_masks, axis=0)  # [M, L, L]
        self.masks = ms.Tensor(all_masks, dtype=mstype.float32)
        self.gather = P.Gather()

    def construct(self, m_idx):
        return self.gather(self.masks, m_idx, 0)


# class MSELossMMChannel(nn.Cell):
#     """MSELoss with channel mask"""
#     def __init__(self, manager: DataManager, norm_pixel_loss=True):
#         super().__init__()
#         self.seq_len = manager.seq_len
#         self.total_dim = manager.total_dim
#         # build channel masks
#         all_masks = np.zeros((manager.n_modality, manager.total_dim))
#         for m_idx, m in enumerate(manager):
#             lo, hi = m.get_range(m_idx)
#             all_masks[m_idx, lo:hi] = 1
#         self.channel_masks = ms.Tensor(all_masks, dtype=mstype.float32)

#         self.gather = P.Gather()
#         self.add_loss = P.Add()
#         self.sub = P.Sub()
#         self.divide = P.RealDiv()
#         self.pow = P.Pow()
#         self.square = P.Square()
#         self.cast = P.Cast()
#         self.mul = P.Mul()
#         self.sum = P.ReduceSum()
#         self.sum_keep = P.ReduceSum(keep_dims=True)
#         self.expand_dim = P.ExpandDims()
#         self.tile = P.Tile()
#         self.norm_pixel_loss = norm_pixel_loss

#     def construct(self, pred, target, mask, m_idx):
#         """mse loss construct."""
#         pred = self.cast(pred, mstype.float32)
#         target = self.cast(target, mstype.float32)
#         mask = self.cast(mask, mstype.float32)

#         channel_mask = self.gather(self.channel_masks, m_idx, 0)
#         channel_cnt = self.sum_keep(channel_mask, -1)
#         channel_cnt = self.expand_dim(channel_cnt, 1)
#         # tile channel mask
#         channel_mask = self.expand_dim(channel_mask, 1)
#         channel_mask = self.tile(channel_mask, (1, self.seq_len, 1))

#         if self.norm_pixel_loss:
#             mean = self.divide(self.sum_keep(target, -1), channel_cnt)
#             sub = self.mul(self.sub(target, mean), channel_mask)
#             sub_pow = self.pow(sub, 2)
#             var = self.divide(self.sum_keep(sub_pow, -1), channel_cnt)
#             var = self.add_loss(var, 1e-6)
#             std = self.pow(var, 0.5)
#             target = self.divide(sub, std)

#         pred = self.mul(pred, channel_mask)
#         res = self.sub(pred, target)
#         recon_loss = self.square(res)
#         recon_loss = self.divide(self.sum(recon_loss, -1), channel_cnt)
#         loss_mask = self.mul(recon_loss, mask)
#         loss_sum = self.sum(loss_mask)
#         mask_sum = self.sum(mask)
#         loss = self.divide(loss_sum, mask_sum)
#         return loss


class MAELossMM(nn.Cell):
    def __init__(self, manager: DataManager, norm_pixel_loss=True, loss_type='l2'):
        super().__init__()
        self.seq_len = manager.seq_len
        self.total_dim = manager.total_dim

        self.gather = P.Gather()
        self.add_loss = P.Add()
        self.sub = P.Sub()
        self.divide = P.RealDiv()
        self.pow = P.Pow()
        self.square = P.Square()
        self.cast = P.Cast()
        self.mul = P.Mul()
        self.sum = P.ReduceSum()
        self.mean = P.ReduceMean()
        self.sum_keep = P.ReduceSum(keep_dims=True)
        self.expand_dim = P.ExpandDims()
        self.tile = P.Tile()
        self.reshape = P.Reshape()
        self.norm_pixel_loss = norm_pixel_loss
        self.equal = P.Equal()
        self.greater = P.Greater()
        assert loss_type in ('l1', 'l2')
        self.loss_fn = P.Square() if loss_type == 'l2' else P.Abs()

    def construct(self, pred, target, target_mask):
        """mse loss construct."""
        pred = self.cast(pred, mstype.float32)
        target = self.cast(target, mstype.float32)
        target_mask = self.cast(target_mask, mstype.float32)
        target_mask_binary = self.cast(self.greater(target_mask, 0), mstype.float32)
        channel_cnt = self.sum_keep(target_mask_binary, -1)
        channel_cnt_div = self.add_loss(channel_cnt, 1e-6)

        if self.norm_pixel_loss:
            mean = self.divide(self.sum_keep(target, -1), channel_cnt_div)
            sub = self.mul(self.sub(target, mean), target_mask_binary)
            sub_pow = self.pow(sub, 2)
            var = self.divide(self.sum_keep(sub_pow, -1), channel_cnt_div)
            var = self.add_loss(var, 1e-6)
            std = self.pow(var, 0.5)
            target = self.divide(sub, std)

        target = self.mul(target, target_mask_binary)
        pred = self.mul(pred, target_mask_binary)
        res = self.sub(pred, target)
        recon_loss = self.loss_fn(res)
        recon_loss = self.mul(recon_loss, target_mask)  ## target mask reweighting
        loss_per_instance = self.divide(self.sum(recon_loss, (-2, -1)), self.sum(target_mask, (-2, -1)))
        mean_loss = self.mean(loss_per_instance)
        return mean_loss


class PatchwiseGuide(nn.Cell):
    def __init__(self, guide_scale=2.0):
        super().__init__()
        self.guide_scale = guide_scale
        self.greater = P.Greater()
        self.any = P.ReduceAny(keep_dims=True)
        self.cast = P.Cast()
        self.mul = P.Mul()
        self.add = P.Add()
        self.ones_like = P.OnesLike()

    def construct(self, target_mask, mask_guide):
        patch_of_interest = self.any(self.greater(mask_guide, 0), -1)  # [B, L, 1]
        patch_of_interest = self.cast(patch_of_interest, mstype.float32)

        target_mult = self.mul(patch_of_interest, self.guide_scale - 1)
        target_mult = self.add(self.ones_like(target_mult), target_mult)

        target_weight = self.mul(target_mask, target_mult)
        return target_weight


class MAELossMMGuide(nn.Cell):
    def __init__(self, manager: DataManager, norm_pixel_loss=True, loss_type='l2', guide_mode='patch', guide_scale=2.0):
        super().__init__()
        self.seq_len = manager.seq_len
        self.total_dim = manager.total_dim

        self.gather = P.Gather()
        self.add_loss = P.Add()
        self.sub = P.Sub()
        self.divide = P.RealDiv()
        self.pow = P.Pow()
        self.square = P.Square()
        self.cast = P.Cast()
        self.mul = P.Mul()
        self.sum = P.ReduceSum()
        self.mean = P.ReduceMean()
        self.sum_keep = P.ReduceSum(keep_dims=True)
        self.expand_dim = P.ExpandDims()
        self.tile = P.Tile()
        self.reshape = P.Reshape()
        self.norm_pixel_loss = norm_pixel_loss
        self.equal = P.Equal()
        self.greater = P.Greater()
        assert loss_type in ('l1', 'l2')
        self.loss_fn = P.Square() if loss_type == 'l2' else P.Abs()
        if guide_mode == 'patch':
            self.guide_fn = PatchwiseGuide(guide_scale)
        else:
            raise NotImplementedError

    def construct(self, pred, target, target_mask, mask_guide):
        """mse loss construct."""
        pred = self.cast(pred, mstype.float32)
        target = self.cast(target, mstype.float32)
        target_mask = self.cast(target_mask, mstype.float32)
        target_weight = self.guide_fn(target_mask, mask_guide)
        channel_cnt = self.sum_keep(target_mask, -1)
        channel_cnt_div = self.add_loss(channel_cnt, 1e-6)

        if self.norm_pixel_loss:
            mean = self.divide(self.sum_keep(target, -1), channel_cnt_div)
            sub = self.mul(self.sub(target, mean), target_mask)
            sub_pow = self.pow(sub, 2)
            var = self.divide(self.sum_keep(sub_pow, -1), channel_cnt_div)
            var = self.add_loss(var, 1e-6)
            std = self.pow(var, 0.5)
            target = self.divide(sub, std)

        target = self.mul(target, target_mask)
        pred = self.mul(pred, target_mask)
        res = self.sub(pred, target)
        recon_loss = self.loss_fn(res)
        recon_loss = self.mul(recon_loss, target_weight)  ## target mask reweighting
        loss_per_instance = self.divide(self.sum(recon_loss, (-2, -1)), self.sum(target_weight, (-2, -1)))
        mean_loss = self.mean(loss_per_instance)
        return mean_loss
