'''
Description: 各种注意力模块
Author: suyunzheng
Date: 2021-12-03 21:52:36
LastEditTime: 2021-12-03 22:21:42
LastEditors: maple
'''

from os import kill
from re import S
from time import sleep
import torchsparse
import torchsparse.nn as spnn
from torch import nn
from torchsparse import PointTensor
import torch
import numpy as np
from torchsparse.tensor import SparseTensor
from core.models.utils import initial_voxelize, point_to_voxel, voxel_to_point

import data_utils.s3dis.s3disDataLoader as s3dis
import data_utils.semantickitti.SemanticKittiDataLoader as kitti
class SelfAttention(nn.Module):
    def __init__(self):
        super().__init__()
        

        pass

    def forward(self, x):
        
        q = x
        k = x
        v = x
        x_x = SparseTensor(q.F*k.F, q.C, q.s)           # N*N
        
        
        
        return x_x

        # Self-Attention
        # batch = [N1*C, N2*C]
        # [N1*C, N2*C] ===> [N1*N1, N2*N2] ===> softmax ===> [N1*N1, N2*N2] \
        # [N1*C, N2*C]*[N1*N1, N2*N2] ===> [N1*C, N2*C]



def self_attention(sparse_tensor):
    pass


if __name__ == '__main__':
    dataloader = kitti.getDataLoader(voxel_size=0.05, split='test', 
                                    batch_size=2, num_points = 8000)
    iter = dataloader.__iter__()
    for i in range(100):
        feed_dict = iter.next()
        input = feed_dict['lidar'].to('cuda:0')
        sa1 = SelfAttention().to('cuda:0')
        out = sa1(input)

