import math
import oneflow as torch
import logging
import oneflow.nn as nn
import oneflow.nn.functional as F


logger = logging.getLogger(__name__)

# def SpikeTriggeredSelection(spikes, mask=None):
#     """Select the corresponding states based on the triggered spikes.

#     Args:
#         spikes ([torch.Tensor]): tensor filled with 1 or 0, 1 means triggered states and 0 means ignored states.
#             it's shape is [b, t]
#         memory ([torch.Tensor]): [b, t, v]
#         mask([torch.Tensor], Optional), it's shape is [b, t]
#     Return:
#         compact_memory: [b, ct, v]
#         compact_mask: [b, ]
#     """


# class ConvlutionSampling(nn.Module):


class SumOrMeanDownSampling(nn.Module):
    def __init__(self, n, apply_mean=True):
        super().__init__()

        self.n = n
        self.apply_mean = apply_mean
    
    def forward(self, inputs, inputs_mask):
        """
        Args:
            inputs ([type]): [b, t, v]
            inputs_mask ([type]): [b, 1, t]
        Out:
            outputs: [b, t//n, v]
            outputs_mask: [b, t//n, v]
        """
        inputs.masked_fill_(~inputs_mask.transpose(1, 2), 0.0)
        b, t, v = inputs.size()
        
        inputs_mask = inputs_mask.int()
        if t % self.n != 0:
            padding_length = self.n - t % self.n
            inputs = F.pad(inputs, pad=(0, 0, 0, padding_length), value=0.0)
            inputs_mask = F.pad(inputs_mask, pad=(0, padding_length), value=0.0)

        outputs = torch.sum(inputs.reshape(b, -1, self.n, v), dim=2)
        outputs_mask = torch.sum(inputs_mask.reshape(b, 1, -1, self.n), dim=-1) > 0

        return torch.div(outputs, self.n) if self.apply_mean else outputs, outputs_mask


class Conv1dDownSampling(nn.Module):
    def __init__(self, kernel_size, feat_dim, n):
        super().__init__()

        self.kernel_size = kernel_size
        self.n = n

        assert self.kernel_size % 2 == 1

        self.conv_layer = nn.Conv1d(
            in_channels=feat_dim,
            out_channels=feat_dim,
            kernel_size=kernel_size,
            stride=n,
            padding=0
        )

    def forward(self, inputs, inputs_mask):
        """
        Args:
            inputs ([type]): [b, t, v]
            inputs_mask ([type]): [b, 1, t]
        Out:
            outputs: [b, t//n, v]
            outputs_mask: [b, t//n, v]
        """
        inputs.masked_fill_(~inputs_mask.transpose(1, 2), 0.0)
        _, t, _ = inputs.size()

        padding_length = math.ceil(t / self.n - 1) * self.n + self.kernel_size // 2 + 1 - t
        inputs = F.pad(inputs, pad=(0, 0, self.kernel_size // 2,  padding_length), value=0.0)
        inputs = inputs.transpose(1, 2)
        outputs = self.conv_layer(inputs)
        outputs = outputs.transpose(1, 2)
        outputs_mask = inputs_mask[:,:,::self.n]
        outputs.masked_fill_(~outputs_mask.transpose(1, 2), 0.0)

        return outputs, outputs_mask


class AttentionBasedSampling(nn.Module):
    def __init__(self, qdim, mdim, odim):
        super().__init__()

        self.qlinear = nn.Linear(qdim, qdim)
        self.mlinear = nn.Linear(mdim, qdim)
        self.olinear = nn.Linear(qdim, odim)

    def forward(self, query, memory, qmmask=None):
        """[summary]
        Args:
            query ([torch.Tensor]): [b, t1, qdim]
            memory ([torch.Tensor]): [b, t2, mdim]
            qmmask ([torch.Tensor], optional): [b, t1, t2]. Defaults to None.
        """
        query = self.qlinear(query)
        memory = self.mlinear(memory)

        e = torch.matmul(query, memory.transpose(1, 2))
        if qmmask is not None:
            e.masked_fill_(~qmmask, -float('inf'))
        
        attnw = torch.softmax(e, dim=-1)
        out = torch.matmul(attnw, memory)

        return self.olinear(out)


class MultiHeadAttentionBasedSampling(nn.Module):
    def __init__(self, qdim, mdim, odim, nhead=4):
        super().__init__()

        self.nhead = nhead
        self.hdim = nhead * (qdim // nhead)

        self.qlinear = nn.Linear(qdim, self.hdim * self.nhead)
        self.mlinear = nn.Linear(mdim, self.hdim * self.nhead)
        self.olinear = nn.Linear(self.hdim * self.nhead, odim)

    def forward(self, query, memory, qmmask=None):
        """
        Args:
            query ([torch.Tensor]): [b, t1, qdim]
            memory ([torch.Tensor]): [b, t2, mdim]
            qmmask ([torch.Tensor], optional): [b, t1, t2]. Defaults to None.
        """
        b = query.size(0)
        query = self.qlinear(query).reshape(b, -1, self.nhead, self.hdim).transpose(1, 2)
        memory = self.mlinear(memory).reshape(b, -1, self.nhead, self.hdim).transpose(1, 2)

        e = torch.matmul(query, memory.transpose(-2, -1))
        if qmmask is not None:
            e.masked_fill_(~qmmask.unsqueeze(1), -float('inf'))
        
        attnw = torch.softmax(e, dim=-1)
        out = torch.matmul(attnw, memory).transpose(1, 2).reshape(b, -1, self.hdim * self.nhead)

        return self.olinear(out)


class LengthMeanSampling(nn.Module):
    def __init__(self, qdim, odim, include_linear=False):
        super().__init__()

        self.include_linear = include_linear
        if self.include_linear:
            self.olinear = nn.Linear(qdim, odim)
            logger.info('[LengthMeanSampling] Including a output linear layer!')

    def forward(self, memory, mask):
        """
        Args:
            memory ([torch.Tensor]): [b, t2, v]
            mask ([torch.Tensor]): [b, t1, t2]
        """
        length = torch.sum(mask, dim=-1)
        length_mask = torch.sum(mask, dim=-1) == 0
        tmpM = memory.unsqueeze(1).repeat([1, mask.size(1), 1, 1])
        tmpM = torch.sum(tmpM.masked_fill_(~mask.unsqueeze(-1), 0.0), dim=2)
        length.masked_fill_(length_mask, 1)
        tmpM = torch.div(tmpM, length.unsqueeze(-1))
        return self.olinear(tmpM) if self.include_linear else tmpM


class LinearPoolingForSST(nn.Module):
    def __init__(self, window_kernel_size, feat_dim, channel_depend=False):
        super().__init__()
        logger.info('[LinearPoolingForSST] Apply LinearPoolingForSST!')
        self.channel_depend = channel_depend
        self.project_linear = nn.Linear(
            window_kernel_size * feat_dim if self.channel_depend else window_kernel_size, 
            feat_dim if self.channel_depend else 1, bias=False
        )
        if self.channel_depend: logger.info('[LinearPoolingForSST] Apply Channel-Depend for LinearPoolingForSST!')

    def forward(self, inputs, inputs_mask):
        """
        Args:
            inputs ([type]): [batch, n, window_kernel_size, feat_out_dim]
            inputs_mask ([type]): [batch, n, window_kernel_size]
        """
        inputs.masked_fill_(~inputs_mask.unsqueeze(-1), 0.0)
        if self.channel_depend:
            b, n, w, h = inputs.size()
            outputs = self.project_linear(inputs.reshape(b, n, w * h))
        else:
            outputs = self.project_linear(inputs.transpose(2, 3)).squeeze(-1)

        return outputs, torch.sum(inputs_mask.int(), dim=-1).unsqueeze(1) > 0

class LinearAndAttentionPoolingForSST(nn.Module):
    def __init__(self, feat_dim, include_out_proj=True):
        super().__init__()
        logger.info('[LinearAndAttentionPoolingForSST] Apply LinearAndAttentionPoolingForSST!')
        self.EnergyLinear = nn.Linear(feat_dim, 1)
        self.include_out_proj = include_out_proj
        if self.include_out_proj:
            self.olinear = nn.Linear(feat_dim, feat_dim)
            logger.info('[LinearAndAttentionPoolingForSST] Apply OutputProjector for LinearAndAttentionPoolingForSST!')

    def forward(self, inputs, inputs_mask):
        """
        Args:
            inputs ([type]): [batch, n, window_kernel_size, feat_out_dim]
            inputs_mask ([type]): [batch, n, window_kernel_size]
        """
        # e [b, n, w, 1]
        e = self.EnergyLinear(inputs).squeeze(-1)
        e.masked_fill_(~inputs_mask, -10000)
        attnw = torch.softmax(e, dim=-1)
        out = torch.matmul(attnw.unsqueeze(2), inputs).squeeze(2)
        if self.include_out_proj: out = self.olinear(out)
        output_mask = torch.sum(inputs_mask.int(), dim=-1) > 0
        return out.masked_fill_(~output_mask.unsqueeze(-1), 0.0), output_mask.unsqueeze(1)

class MeanPoolingForSST(nn.Module):
    def __init__(self):
        super().__init__()

        logger.info('[MeanPoolingForSST] Apply MeanPoolingForSST!')

    def forward(self, inputs, inputs_mask):
        """
        Args:
            inputs ([type]): [batch, n, window_kernel_size, feat_out_dim]
            inputs_mask ([type]): [batch, n, window_kernel_size]
        """
        inputs.masked_fill_(~inputs_mask.unsqueeze(-1), 0.0)
        length = torch.sum(inputs_mask.int(), dim=-1)
        output_mask = length > 0
        outputs = torch.sum(inputs, dim=2)
        length.masked_fill_(~output_mask, 1.0)

        return torch.div(outputs, length.unsqueeze(-1)), output_mask.unsqueeze(1)


# class ConvLinearWeightedSampling(nn.Module):
#     def __init__(self, qdim, odim, kernel_size, channel, include_out_linear=False):
#         super().__init__()
#         assert kernel_size % 2 == 1

#         self.conv = nn.Conv1d(qdim, channel, kernel_size, padding=kernel_size//2, bias=True)
#         self.wlinear = nn.Linear(channel, 1)
#         self.include_out_linear = include_out_linear
#         if self.include_out_linear:
#             self.olinear = nn.Linear(channel, odim)

#     def forward(self, memory, mask):
#         """
#         Args:
#             memory ([torch.Tensor]): [b, t2, v]
#             mask ([torch.Tensor]): [b, t1, t2]
#         """
#         memory = self.conv(memory.transpose(1, 2)).transpose(1, 2)
#         length_mask = torch.sum(mask, dim=-1).unsqueeze(-1) == 0
#         energy = self.wlinear(memory).transpose(1, 2).repeat([1, mask.size(1), 1])
#         energy.masked_fill_(~mask, -float('inf'))
#         energy.masked_fill_(length_mask, 1)
#         weight = torch.softmax(energy, dim=-1) # [b, t1, t2]
#         weight.masked_fill_(length_mask, 0)
#         out = torch.matmul(weight, memory)
#         return self.olinear(out) if self.include_out_linear else out    




# if __name__ == '__main__':

# #     layer = MultiHeadAttentionBasedSampling(10, 6, 5, 2)
# #     q = torch.rand(1, 3, 10)
# #     m = torch.rand(1, 6, 6)
# #     qmmask = torch.Tensor([[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1]]]) > 0
# #     out = layer(q, m, qmmask)
# #     logger.info(out.size())
#     # layer = LengthMeanSampling()
#     # inputs = torch.rand(2, 6, 3)
#     # mask = torch.Tensor([[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1]], [[1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]]) > 0
#     # out = layer(inputs, mask)
#     layer = ConvLinearWeightedSampling(3, 4, 3, 2)
#     inputs = torch.rand(2, 6, 3)
#     mask = torch.Tensor([[[1, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1]], [[1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0]]]) > 0
#     out = layer(inputs, mask)
#     logger.info(out.size())
#     logger.info(out)
