'''
https://github.com/sidhikabalachandar/oavnn/blob/2229ed34a634b6f7f27d327a68085873a891c6dd/model.py#L264
'''

import torch

def knn(x, k):
    # distance between (xi, yi, zi) and (xj, yj, zj) is
    # sqrt((xi - xj) ** 2 + (yi - yj) ** 2 + (zi - zj) ** 2) =
    # sqrt((xi ** 2 + yi ** 2 + zi ** 2) + (xj ** 2 + yj ** 2 + zj ** 2) -2 (xixj + yiyj + zizj)) =
    # Since we only care about ordering
    # (xi ** 2 + yi ** 2 + zi ** 2) + (xj ** 2 + yj ** 2 + zj ** 2) -2 (xixj + yiyj + zizj)

    # calculate -2 (xixj + yiyj + zizj) term
    inner = -2 * torch.matmul(x.transpose(2, 1), x)

    # calculate (xi ** 2 + yi ** 2 + zi ** 2) terms
    xx = torch.sum(x ** 2, dim=1, keepdim=True)

    # negates distance for topk function
    pairwise_distance = -xx - inner - xx.transpose(2, 1)

    # gets topk indices
    idx = pairwise_distance.topk(k=k, dim=-1)[1]  # (batch_size, num_points, k)
    return idx

def get_graph_feature(x, k=20, idx=None, x_coord=None, use_x_coord=False):
    batch_size = x.size(0)
    num_points = x.size(3)
    x = x.view(batch_size, -1, num_points)
    if idx is None:
        if not use_x_coord:  # dynamic knn graph
            idx = knn(x, k=k)  # (batch_size, num_points, k)
        else:  # fixed knn graph with input point coordinates
            x_coord = x_coord.view(batch_size, -1, num_points)
            # if we just do idx = knn(x_coord, k=k), we get nan loss
            idx = knn(x_coord, k=k + 1)
            idx = idx[:, :, 1:]  # find k nearest neighbors for each point (excluding self as negihbor)
    device = torch.device('cuda')

    idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points

    idx = idx + idx_base

    idx = idx.view(-1)

    _, num_dims, _ = x.size()
    num_dims = num_dims // 3

    x = x.transpose(2,
                    1).contiguous()  # (batch_size, num_points, num_dims)  -> (batch_size*num_points, num_dims) #   batch_size * num_points * k + range(0, batch_size*num_points)
    feature = x.view(batch_size * num_points, -1)[idx, :]
    feature = feature.view(batch_size, num_points, k, num_dims, 3)
    x = x.view(batch_size, num_points, 1, num_dims, 3).repeat(1, 1, k, 1, 1)

    feature = torch.cat((feature - x, x), dim=3).permute(0, 3, 4, 1, 2).contiguous()

    return feature

def sq_dist_mat(source):
    # source = target = B x C x 3
    r0 = source * source  # B x C x 3
    r0 = torch.sum(r0, dim=2, keepdim=True)  # B x C x 1
    r1 = r0.permute(0, 2, 1)  # B x 1 x C
    sq_distance_mat = r0 - 2. * torch.matmul(source, source.permute(0, 2, 1)) + r1  # B x C x C

    return sq_distance_mat


def compute_patches(source, sq_distance_mat, num_samples):
    # source = target = B x C x 3
    # sq_distance_mat = B x C x C
    batch_size = source.size()[0]
    num_points_source = source.size()[1]
    assert (num_samples <= num_points_source)

    sq_patches_dist, patches_idx = torch.topk(-sq_distance_mat, k=num_samples, dim=-1)  # B x C x k
    sq_patches_dist = -sq_patches_dist

    device = torch.device('cuda')
    idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points_source
    patches_idx = patches_idx + idx_base
    patches_idx = patches_idx.view(-1)
    feature = source.reshape(batch_size * num_points_source, -1)[patches_idx, :]
    feature = feature.view(batch_size, num_points_source, num_samples, 3)
    return feature


def orientation_embd(x, patch_size, num_shells):
    sq_distance_mat = sq_dist_mat(x)
    patches_ = compute_patches(x, sq_distance_mat, patch_size)  # B x C x K x 3
    all_embd = torch.reshape(patches_, (
        patches_.size()[0], patches_.size()[1], num_shells, -1, 3))  # B x C x num_shells x K / num_shells x 3
    all_embd = all_embd - torch.unsqueeze(torch.unsqueeze(x, dim=-2), dim=-2)
    embd = torch.mean(all_embd, dim=-2)  # B x C x num_shells x 3

    I = []
    J = []
    for i in range(num_shells):
        for j in range(num_shells):
            if i < j:
                I.append(i)
                J.append(j)
    I = torch.Tensor(I).type(torch.int64)  # N = num_shells * (num_shells - 1) / 2
    J = torch.Tensor(J).type(torch.int64)  # N = num_shells * (num_shells - 1) / 2

    embd_I = embd[:, :, I, :]  # B x C x N x 3
    embd_J = embd[:, :, J, :]  # B x C x N x 3

    cross = torch.cross(embd_I, embd_J, dim=-1)  # B x C x N x 3
    B, C, N, _ = cross.size()
    cross = torch.reshape(cross, (B, -1, 3))
    embd = torch.reshape(cross, (B, -1, 3))
    return cross, embd

if __name__ == '__main__':
    k = 40 # Num of nearest neighbors to use
    num_points = 128 # num of points to use
    num_shells = 4 # num of shells to use
    use_x_coord = True


    x_in = torch.randn([8,3,128]).cuda() # B x 3 x C
    batch_size = x_in.size(0)
    num_points = x_in.size(2)

    x = x_in.unsqueeze(1)
    x_coord = x  # B x E x 3 x C

    x_graph = get_graph_feature(x, k=k, x_coord=x_coord, use_x_coord=use_x_coord)  # B x E=2 x 3 x C x K

    rot_cross, rot_embed = orientation_embd(x_in.permute(0, 2, 1), num_points // num_shells,
                                            num_shells)  # B x C x 3

    rot_cross = torch.sum(rot_cross, dim=1)  # B x 3
    rot_cross = rot_cross.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # B x 1 x 3 x 1 x 1
    rot_cross = rot_cross.repeat(1, 1, 1, num_points, k)  # B x 1 x 3 x C x K
    x_graph = torch.cat((x_graph, rot_cross), dim=1).contiguous()  # B x E=3 x 3 x C x K

    print("Done!")