import math
import oneflow as torch
import oneflow.nn as nn
import oneflow.nn.functional as F


class SlidingWindow(nn.Module):

    def __init__(self, model_size, left_context=5, right_context=0, stride=1):
        super(SlidingWindow, self).__init__()

        self.model_size = model_size
        self.left_contxt = left_context
        self.right_context = right_context
        self.chunk_size = left_context + right_context + 1
        self.stride = stride
        # padding_len = math.floor(chunk_size / 2)
        self.unfold1 = nn.Unfold(kernel_size=(self.chunk_size, self.model_size), padding=0, stride=stride)
        self.unfold2 = nn.Unfold(kernel_size=(self.chunk_size, 1), padding=0, stride=stride)

    def forward(self, inputs, mask=None):
        # inputs: [n, t, v]
        # mask: [n, 1, t]
        b = inputs.size(0)

        if self.stride == 1:
            inputs = F.pad(inputs, pad=(0, 0, self.left_contxt // 2, self.right_context), value=0.0)
        else:
            padding_len = self.get_padding_len(inputs.size(1))
            inputs = F.pad(inputs, pad=(0, 0, 0, padding_len), value=0.0)

        inputs = inputs.unsqueeze(1)
        output = self.unfold1(inputs)
        nb = output.size(2)
        output = output.transpose(1, 2)
        chunk_output = output.reshape(b, nb, self.chunk_size, self.model_size)

        if mask is not None:
            if self.stride == 1:
                mask = F.pad(mask.float(), pad=(self.left_contxt, self.right_context), value=0)
            else:
                mask = F.pad(mask.float(), pad=(0, padding_len), value=0)
            mask = mask.unsqueeze(-1)       
            frame_mask = self.unfold2(mask)
            frame_mask = frame_mask.transpose(1, 2).gt(0)
            chunk_mask = torch.sum(frame_mask, dim=-1).gt(0)
        else:
            frame_mask = None
            chunk_mask = None

        return chunk_output, frame_mask, chunk_mask

    def get_padding_len(self, length):
        max_num_chunks = math.ceil(max(length - self.chunk_size, 0) / self.stride + 1)
        return (max_num_chunks - 1) * self.stride + self.chunk_size - length


# if __name__ == '__main__':

#     window = SlidingWindow(model_size=4, left_context=3, right_context=0, stride=2)

#     length = 7
#     inputs = torch.rand(2, length, 4)
#     mask = torch.ones([2, 1, length], dtype=torch.uint8)

#     output, frame_mask, chunk_mask = window(inputs, mask)

    # print(inputs)
    # print('**')
    # print(output)
    # print(output.size())
    # print(frame_mask)
    # print(frame_mask.size())
    # print(chunk_mask)
    # print(chunk_mask.size())