import oneflow as torch

def get_length_mask(b, t, length, device='gpu:0'):
    """[summary]
    Args:
        b ([type]): [description]
        t ([type]): [description]
        length ([type]): [b]
        device (str, optional): [description]. Defaults to 'gpu:0'.
    """
    length_tensor = torch.arange(1, t+1).unsqueeze(0).repeat([b, 1])
    length = length.unsqueeze(1).repeat([1, t])
    return (length_tensor < length).to(device)

def get_upper_triangular_mask(b, t1, t2, diagonal=0, device='gpu:0'):
    ones = torch.ones([b, t1, t2], device=device)
    return ~(torch.triu(ones, diagonal=-diagonal) > 0)

def get_lower_triangular_mask(b, t1, t2, diagonal=0, device='gpu:0'):
    ones = torch.ones([b, t1, t2], device=device)
    return ~(torch.tril(ones, diagonal=diagonal) > 0)

def get_upper_and_lower_triangular_mask(b, t1, t2, l=0, r=0, device='gpu:0'):
    ones = torch.ones([b, t1, t2], device=device)
    return (torch.tril(torch.triu(ones, diagonal=-(l+1)), diagonal=r+1) > 0)

# def FixedSpikeExtendMask(spikes, left_context=3, right_context=3):

# def AdaptiveSpikeExtendMask(spikes):


# if __name__ == '__main__':
    #print(get_upper_triangular_mask(2, 7, 7, 0, 'cpu'))
    #print(get_lower_triangular_mask(2, 7, 7, 0, 'cpu'))
    # print(get_upper_and_lower_triangular_mask(2, 7, 7, 2, 2, 'cpu'))
