import torch


def space_to_depth(tensor, scale_factor):  # scale_factor = 2
    num, ch, height, width = tensor.shape  # n 22 128 128
    if height % scale_factor != 0 or width % scale_factor != 0:
        raise ValueError('height and widht of tensor must be divisible by '
                         'scale_factor.')

    # new_ch = ch * (scale_factor * scale_factor)  # 22 * 2 * 2 = 88
    new_height = height // scale_factor  # 128//2 = 64
    new_width = width // scale_factor  # 64

    tensor = tensor.reshape(
        [num, ch, new_height, scale_factor, new_width, scale_factor])  # n 22 64 2 64 2
    # new axis: [num, ch, scale_factor, scale_factor, new_height, new_width]
    tensor = tensor.permute([0, 1, 3, 5, 2, 4])  # n 22 2 2 64 64
    tensor = tensor.reshape([num, scale_factor * scale_factor, ch, new_height, new_width])  # n 4 22 64 64
    return tensor


def space_to_depth2(in_tensor, down_scale):
    n, c, h, w = in_tensor.size()
    unfolded_x = torch.nn.functional.unfold(in_tensor, down_scale, stride=down_scale)
    return unfolded_x.view(n, c * down_scale ** 2, h // down_scale, w // down_scale)


def space_to_depth3(in_tensor, down_scale):
    batchsize, ch, height, width = in_tensor.size()
    out_channel = ch * (down_scale ** 2)
    out_height = height // down_scale
    out_width = width // down_scale

    in_tensor_view = in_tensor.view(batchsize * ch, out_height, down_scale, out_width, down_scale)
    output = in_tensor_view.permute(0, 2, 4, 1, 3).contiguous().view(batchsize, out_channel, out_height, out_width)
    return output
