import oneflow as torch
import oneflow.nn.functional as F


def get_length_mask(lengths):
    batch_size=lengths.numel()
    return (torch.arange(0, lengths.max(), device=lengths.device)
    .type_as(lengths)
    .unsqueeze(0).expand(batch_size, lengths.max())
    .lt(lengths.unsqueeze(1)))


def spike_expansion(spikes, mask=None, left_context=1, right_context=1):
    """spike expansion

    Args:
        spikes ([torch.IntTensor]): the ctc non-blank spike [b, t]
        mask: [b, t]
        left_context (int, optional): expand n left-context into the center spike. Defaults to 1.
        right_context (int, optional): expand n right-context into the center spike. Defaults to 1.
    """

    spikes = spikes.int()
    expand_spike_mask = spikes.clone()

    if left_context > 0:
        for l in range(1, left_context+1):
            expand_spike_mask[:, :-l] += spikes[:, l:]

    if right_context > 0:
        for r in range(1, right_context+1):
            expand_spike_mask[:, r:] += spikes[:, :-r] 

    if mask is not None:
        expand_spike_mask.masked_fill_(mask, 0)

    return expand_spike_mask > 0


def sparse_enc_memory_based_on_spikes(memory_in, memory_in_mask, spikes, spikes_mask):
    """Sparse Enc Memory Based On Spikes

    Args:
        memory_in ([torch.FloatTensor]): [b, t1, v]
        memory_in_mask ([torch.IntTensor]): [b, t1]
        spikes ([torch.IntTensor]): [b, t1]
        spikes_mask ([torch.IntTensor]): [b, t1]
    Return:
        memory_out: ([torch.FloatTensor]): [b, t2, v]
        sparsity: float
    """
    b = spikes.size(0)
    t_per = torch.sum(spikes_mask, dim=-1)
    t_max = torch.max(t_per)
    t_min = torch.min(t_per)

    sparsity = torch.sum(spikes_mask).item() / torch.sum(memory_in_mask).item()

    diff_len = t_max - t_per
    diff_mask = get_length_mask(diff_len)
    spikes_mask = torch.cat([spikes_mask, diff_mask], dim=1)

    pad_len = t_max.item() - t_min.item()
    memory_in = F.pad(memory_in, pad=[0, 0, 0, pad_len], value=0.0)
    spikes = F.pad(spikes, pad=[0, pad_len], value=0.0)
    assert memory_in.size(1) == spikes_mask.size(1)
    memory_out = torch.masked_select(memory_in, spikes_mask.unsqueeze(-1)).reshape(b, t_max.item(), -1)
    sparse_spikes = torch.masked_select(spikes, spikes_mask).reshape(b, t_max.item())

    return memory_out, t_per, sparse_spikes, sparsity


# if __name__ == '__main__':
#     memory_in = torch.rand(2, 8, 6)
#     memory_in_mask = torch.LongTensor([[1, 1, 1, 1, 1, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]])
#     spikes = torch.FloatTensor([[0.1, 0.8, 0.7, 0.2, 0.9, 0.1, 0.1, 0.1], [0.8, 0.1, 0.9, 0.2, 0.9, 0.7, 0.1, 0.6]])
#     spikes_mask = spikes > 0.5
#     print(memory_in)
#     print(memory_in_mask)
#     print(spikes)
#     print(spikes_mask.int())
#     mo, tp, ss, s = sparse_enc_memory_based_on_spikes(memory_in, memory_in_mask > 0, spikes, spikes_mask)
#     print(mo)
#     print(tp)
#     print(ss)
#     print(s)