'''
Description: 
Author: suyunzheng
Date: 2021-12-06 22:01:43
LastEditTime: 2021-12-07 20:50:41
LastEditors: maple
'''

from typing import List, Tuple
from numpy.lib import stride_tricks
from numpy.lib.twodim_base import diagflat
import torch
from torch.autograd.function import InplaceFunction
from torch.nn.modules import activation
import torchsparse.nn.functional as F
from torchsparse import PointTensor, SparseTensor
from torchsparse.nn.utils import get_kernel_offsets
from re import S
import torchsparse
import torchsparse.nn as spnn
from torch import nn
import numpy as np
from core.models.utils import initial_voxelize, point_to_voxel, voxel_to_point
from data_utils.s3dis.s3disDataLoader import getDataLoader



class BasicConvolutionBlock(nn.Module):

    def __init__(self, inc, outc, ks=3, stride=1, dilation=1, bn=True, activation=True):
        super().__init__()
        self.net = nn.Sequential(
            spnn.Conv3d(inc,
                        outc,
                        kernel_size=ks,
                        dilation=dilation,
                        stride=stride)
        )
        if bn:
            self.net.add_module("BN1", spnn.BatchNorm(outc))
        if activation:
            self.net.add_module('ReLU', spnn.ReLU(True))

    def forward(self, x):
        out = self.net(x)
        return out

class BasicDeconvolutionBlock(nn.Module):

    def __init__(self, inc, outc, ks=3, stride=1, bn = True, activation = False):
        super().__init__()
        self.net = nn.Sequential(
            spnn.Conv3d(inc,
                        outc,
                        kernel_size=ks,
                        stride=stride,
                        transposed=True)
        )
        if bn:
            self.net.add_module("deBN1", spnn.BatchNorm(outc))
        if activation:
            self.net.add_module('ReLU', spnn.ReLU(True))
    def forward(self, x):
        return self.net(x)


class BasicMLPBlock(nn.Module):
    def __init__(self, dim_in, dim_out, bn = False, bias=True):
        super().__init__()
        self.linear =  nn.Sequential(
            spnn.Conv3d(
                in_channels=dim_in,
                out_channels=dim_out,
                kernel_size=1,
                stride=1,
                transposed=False,
                bias = bias
            )
        )
        if bn:
            self.linear.add_module("BN", spnn.BatchNorm(dim_out))
    def forward(self, x):
        output = self.linear(x)
        return output


class ResidualBlock(nn.Module):

    def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
        super().__init__()
        self.net = nn.Sequential(
            spnn.Conv3d(inc,
                        outc,
                        kernel_size=ks,
                        dilation=dilation,
                        stride=stride),
            spnn.BatchNorm(outc),
            spnn.ReLU(True),
            spnn.Conv3d(outc, outc, kernel_size=ks, dilation=dilation,
                        stride=1),
            spnn.BatchNorm(outc),
        )

        if inc == outc and stride == 1:
            self.downsample = nn.Identity()
        else:
            self.downsample = nn.Sequential(
                spnn.Conv3d(inc, outc, kernel_size=1, dilation=1,
                            stride=stride),
                spnn.BatchNorm(outc),
            )

        self.relu = spnn.ReLU(True)

    def forward(self, x):
        out = self.relu(self.net(x) + self.downsample(x))
        return out


# 传入sparse tensor，对其进行voxel，也就是pooling
def sparse_voxel():
    raise NotImplementedError

# 传入sparse tensor，对其进行上采样，返回上一分辨率的sparse tensor
def upsample():
    raise NotImplementedError

class MultiScaleProjection(nn.Module):
    def __init__(self, dim:int):
        super().__init__()
        self.scales = [2, 4, 8, 16]         # kernel size, stride
        self.conv1 = BasicConvolutionBlock(dim, dim, ks = self.scales[0], stride=self.scales[0], dilation=1, bn=True, activation=False)
        self.deconv1 = BasicDeconvolutionBlock(dim, dim, ks=self.scales[0], stride=self.scales[0], bn=True, activation=False)

        self.conv2 = BasicConvolutionBlock(dim, dim, ks = self.scales[1], stride=self.scales[1], dilation=1, bn=True, activation=False)
        self.deconv2 = BasicDeconvolutionBlock(dim, dim, ks=self.scales[1], stride=self.scales[1], bn=True, activation=False)

        self.conv3 = BasicConvolutionBlock(dim, dim, ks = self.scales[2], stride=self.scales[2], dilation=1, bn=True, activation=False)
        self.deconv3 = BasicDeconvolutionBlock(dim, dim, ks=self.scales[2], stride=self.scales[2], bn=True, activation=False)

        self.conv4 = BasicConvolutionBlock(dim, dim, ks = self.scales[3], stride=self.scales[3], dilation=1, bn=True, activation=False)
        self.deconv4 = BasicDeconvolutionBlock(dim, dim, ks=self.scales[3], stride=self.scales[3], bn=True, activation=False)

        # mlp
        self.mlp1 = BasicMLPBlock(dim_in=dim, dim_out=dim, bias=True)
        self.mlp2 = BasicMLPBlock(dim_in=dim, dim_out=dim, bias=True)
        self.mlp3 = BasicMLPBlock(dim_in=dim, dim_out=dim, bias=True)
        self.mlp4 = BasicMLPBlock(dim_in=dim, dim_out=dim, bias=True)

    def forward(self, x):
        # end_points = {}
        # end_points['x0'] = x

        x_scale_1 = self.conv1(x)
        x_scale_1 = self.deconv1(x_scale_1)
        # end_points["x_scale_1"] = x_scale_1
        # end_points["x_scale_1up"] = x_scale_1_up

        x_scale_2 = self.conv2(x)
        x_scale_2 = self.deconv2(x_scale_2)
        # end_points['x_scale_2'] = x_scale_2
        # end_points['x_scale_2up'] = x_scale_2_up

        x_scale_3 = self.conv3(x)
        x_scale_3 = self.deconv3(x_scale_3)
        # end_points['x_scale_3'] = x_scale_3
        # end_points['x_scale_3up'] = x_scale_3_up

        x_scale_4 = self.conv4(x)
        x_scale_4 = self.deconv4(x_scale_4)
        # end_points['x_scale_4'] = x_scale_4
        # end_points['x_scale_4up'] = x_scale_4_up

        # get feature map offset
        x_scale_1_up_offset = SparseTensor(feats=x_scale_1.F - x.F, coords=x.C, stride=x.stride)
        x_scale_1_up_offset.cmaps = x.cmaps
        x_scale_1_up_offset.kmaps = x.kmaps
        # end_points['x_scale_1_up_offset'] = x_scale_1_up_offset
        
        x_scale_2_up_offset = SparseTensor(feats=x_scale_2.F - x.F, coords=x.C, stride=x.stride)
        x_scale_2_up_offset.cmaps = x.cmaps
        x_scale_2_up_offset.kmaps = x.kmaps
        # end_points['x_scale_2_up_offset'] = x_scale_2_up_offset

        x_scale_3_up_offset = SparseTensor(feats=x_scale_3.F - x.F, coords=x.C, stride=x.stride)
        x_scale_3_up_offset.cmaps = x.cmaps
        x_scale_3_up_offset.kmaps = x.kmaps
        # end_points['x_scale_3_up_offset'] = x_scale_3_up_offset

        x_scale_4_up_offset = SparseTensor(feats=x_scale_4.F - x.F, coords=x.C, stride=x.stride)
        x_scale_4_up_offset.cmaps = x.cmaps
        x_scale_4_up_offset.kmaps = x.kmaps
        # end_points['x_scale_4_up_offset'] = x_scale_4_up_offset

        # kernels
        kernel1 = self.mlp1(x_scale_1_up_offset)
        kernel2 = self.mlp2(x_scale_2_up_offset)
        kernel3 = self.mlp3(x_scale_3_up_offset)
        kernel4 = self.mlp4(x_scale_4_up_offset)

        # multiplication
        projection1 = SparseTensor(feats=kernel1.F*x.F, coords=x.C, stride=x.s)
        projection2 = SparseTensor(feats=kernel2.F*x.F, coords=x.C, stride=x.s)
        projection3 = SparseTensor(feats=kernel3.F*x.F, coords=x.C, stride=x.s)
        projection4 = SparseTensor(feats=kernel4.F*x.F, coords=x.C, stride=x.s)
        # end_points['projection1'] = projection1
        # end_points['projection2'] = projection2
        # end_points['projection3'] = projection3
        # end_points['projection4'] = projection4

        return (projection1, projection2, projection3, projection4)
    

class MultiScaleFusion(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.mlp1 = BasicMLPBlock(dim_in=dim, dim_out=dim, bias=True)
        self.mlp2 = BasicMLPBlock(dim_in=dim, dim_out=dim, bias=True)
        self.mlp3 = BasicMLPBlock(dim_in=dim, dim_out=dim, bias=True)
        self.mlp4 = BasicMLPBlock(dim_in=dim, dim_out=dim, bias=True)

        self.relu = spnn.Sigmoid()
        self.mlp_out = BasicMLPBlock(dim_in=dim, dim_out=dim, bias=True)
    def forward(self, x:Tuple):
        feats_sum = SparseTensor(
            feats = x[0].F+x[1].F+x[2].F+x[3].F,
            coords=x[0].C,
            stride=x[0].s
        )
        
        # attention score
        score1 = self.relu(self.mlp1(feats_sum))
        score2 = self.relu(self.mlp2(feats_sum))
        score3 = self.relu(self.mlp3(feats_sum))
        score4 = self.relu(self.mlp4(feats_sum))

        # multiplication
        feat1 = SparseTensor(feats=score1.F*x[0].F, coords=score1.C, stride=score1.s)
        feat2 = SparseTensor(feats=score2.F*x[1].F, coords=score2.C, stride=score2.s)
        feat3 = SparseTensor(feats=score3.F*x[2].F, coords=score3.C, stride=score3.s)
        feat4 = SparseTensor(feats=score4.F*x[3].F, coords=score4.C, stride=score4.s)

        del score1
        del score2
        del score3
        del score4
        del x
        
        # sum
        feats_sum  = SparseTensor(
            feats = feat1.F + feat2.F + feat3.F + feat4.F,
            coords = feat1.C,
            stride=feat1.s
        )

        outputs = self.mlp_out(feats_sum)
        return outputs


        
class SparseGeometryFeatureEnhancement(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.SGFE = nn.Sequential(
            MultiScaleProjection(dim = dim),
            MultiScaleFusion(dim = dim)
        )

    def forward(self, x):
        out = self.SGFE(x)
        return out 



if __name__ == '__main__':
    # net = MultiScaleProjection(6).to('cuda:0')
    net = SparseGeometryFeatureEnhancement(dim = 6).to('cuda:0')
    print(net)
    dataloader = getDataLoader(voxel_size=0.02, split="test", batch_size=2)

    iter = dataloader.__iter__()
    for i in range(100):
        feed_dict = iter.next()
        input = feed_dict['lidar'].to('cuda:0')
        target = feed_dict['targets'].to('cuda:0')

        input.cmaps.setdefault(input.stride, input.coords)
        # output = net(input)
        output = net(input)

        pass