from re import S
import torchsparse
import torchsparse.nn as spnn
from torch import nn
from torchsparse import PointTensor
import torch
import numpy as np
from core.models.utils import initial_voxelize, point_to_voxel, voxel_to_point

__all__ = ['SPVCNN', 'get_model']


class BasicConvolutionBlock(nn.Module):

    def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
        super().__init__()
        self.net = nn.Sequential(
            spnn.Conv3d(inc,
                        outc,
                        kernel_size=ks,
                        dilation=dilation,
                        stride=stride),
            spnn.BatchNorm(outc),
            spnn.ReLU(True),
        )

    def forward(self, x):
        out = self.net(x)
        return out


class BasicDeconvolutionBlock(nn.Module):

    def __init__(self, inc, outc, ks=3, stride=1):
        super().__init__()
        self.net = nn.Sequential(
            spnn.Conv3d(inc,
                        outc,
                        kernel_size=ks,
                        stride=stride,
                        transposed=True),
            spnn.BatchNorm(outc),
            spnn.ReLU(True),
        )

    def forward(self, x):
        return self.net(x)


class ResidualBlock(nn.Module):

    def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
        super().__init__()
        self.net = nn.Sequential(
            spnn.Conv3d(inc,
                        outc,
                        kernel_size=ks,
                        dilation=dilation,
                        stride=stride),
            spnn.BatchNorm(outc),
            spnn.ReLU(True),
            spnn.Conv3d(outc, outc, kernel_size=ks, dilation=dilation,
                        stride=1),
            spnn.BatchNorm(outc),
        )

        if inc == outc and stride == 1:
            self.downsample = nn.Identity()
        else:
            self.downsample = nn.Sequential(
                spnn.Conv3d(inc, outc, kernel_size=1, dilation=1,
                            stride=stride),
                spnn.BatchNorm(outc),
            )

        self.relu = spnn.ReLU(True)

    def forward(self, x):
        out = self.relu(self.net(x) + self.downsample(x))
        return out


class SPVCNN(nn.Module):

    def __init__(self, **kwargs):
        super().__init__()

        cr = kwargs.get('cr', 1.0)
        cs = [32, 32, 64, 128, 256, 256, 128, 96, 96]
        cs = [int(cr * x) for x in cs]                  # 每一层的输出通道数

        init_dim = kwargs.get('init_dim', 4)

        self.pres = 1
        self.vres = 1
        if 'pres' in kwargs and 'vres' in kwargs:
            self.pres = kwargs.get['pres', 1]
            self.vres = kwargs.get['vres', 1]

        self.stem = nn.Sequential(
            spnn.Conv3d(init_dim, cs[0], kernel_size=3, stride=1),
            spnn.BatchNorm(cs[0]), spnn.ReLU(True),
            spnn.Conv3d(cs[0], cs[0], kernel_size=3, stride=1),
            spnn.BatchNorm(cs[0]), spnn.ReLU(True))

        self.stage1 = nn.Sequential(
            BasicConvolutionBlock(cs[0], cs[0], ks=2, stride=2, dilation=1),
            ResidualBlock(cs[0], cs[1], ks=3, stride=1, dilation=1),
            ResidualBlock(cs[1], cs[1], ks=3, stride=1, dilation=1),
        )

        self.stage2 = nn.Sequential(
            BasicConvolutionBlock(cs[1], cs[1], ks=2, stride=2, dilation=1),
            ResidualBlock(cs[1], cs[2], ks=3, stride=1, dilation=1),
            ResidualBlock(cs[2], cs[2], ks=3, stride=1, dilation=1),
        )

        self.stage3 = nn.Sequential(
            BasicConvolutionBlock(cs[2], cs[2], ks=2, stride=2, dilation=1),
            ResidualBlock(cs[2], cs[3], ks=3, stride=1, dilation=1),
            ResidualBlock(cs[3], cs[3], ks=3, stride=1, dilation=1),
        )

        self.stage4 = nn.Sequential(
            BasicConvolutionBlock(cs[3], cs[3], ks=2, stride=2, dilation=1),
            ResidualBlock(cs[3], cs[4], ks=3, stride=1, dilation=1),
            ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1),
        )

        # self.self_attention1 = 
        self.up1 = nn.ModuleList([
            BasicDeconvolutionBlock(cs[4], cs[5], ks=2, stride=2),
            nn.Sequential(
                ResidualBlock(cs[5] + cs[3], cs[5], ks=3, stride=1, dilation=1),
                ResidualBlock(cs[5], cs[5], ks=3, stride=1, dilation=1),
            )
        ])

        self.up2 = nn.ModuleList([
            BasicDeconvolutionBlock(cs[5], cs[6], ks=2, stride=2),
            nn.Sequential(
                ResidualBlock(cs[6] + cs[2], cs[6], ks=3, stride=1, dilation=1),
                ResidualBlock(cs[6], cs[6], ks=3, stride=1, dilation=1),
            )
        ])

        self.up3 = nn.ModuleList([
            BasicDeconvolutionBlock(cs[6], cs[7], ks=2, stride=2),
            nn.Sequential(
                ResidualBlock(cs[7] + cs[1], cs[7], ks=3, stride=1, dilation=1),
                ResidualBlock(cs[7], cs[7], ks=3, stride=1, dilation=1),
            )
        ])

        self.up4 = nn.ModuleList([
            BasicDeconvolutionBlock(cs[7], cs[8], ks=2, stride=2),
            nn.Sequential(
                ResidualBlock(cs[8] + cs[0], cs[8], ks=3, stride=1, dilation=1),
                ResidualBlock(cs[8], cs[8], ks=3, stride=1, dilation=1),
            )
        ])

        self.classifier = nn.Sequential(nn.Linear(cs[8], kwargs['num_classes']))

        self.point_transforms = nn.ModuleList([
            nn.Sequential(
                nn.Linear(cs[0], cs[4]),
                nn.BatchNorm1d(cs[4]),
                nn.ReLU(True),
            ),
            nn.Sequential(
                nn.Linear(cs[4], cs[6]),
                nn.BatchNorm1d(cs[6]),
                nn.ReLU(True),
            ),
            nn.Sequential(
                nn.Linear(cs[6], cs[8]),
                nn.BatchNorm1d(cs[8]),
                nn.ReLU(True),
            )
        ])

        self.weight_initialization()
        self.dropout = nn.Dropout(0.3, True)

    def weight_initialization(self):
        for m in self.modules():
            if isinstance(m, nn.BatchNorm1d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)

    def forward(self, x):
        # x: SparseTensor z: PointTensor
        z = PointTensor(x.F, x.C.float())

        x0 = initial_voxelize(z, self.pres, self.vres)          # voxel 

        x0 = self.stem(x0)                          # voxel处理
        z0 = voxel_to_point(x0, z, nearest=False)   # voxel转point
        z0.F = z0.F

        x1 = point_to_voxel(x0, z0)                 # voxel
        x1 = self.stage1(x1)
        x2 = self.stage2(x1)
        x3 = self.stage3(x2)
        x4 = self.stage4(x3)                        # [11675, 4], [11675, 256]
        
        # 在这放一个self-attention

        z1 = voxel_to_point(x4, z0)                 # point
        z1.F = z1.F + self.point_transforms[0](z0.F)        # voxel->point + mlp(point)

        y1 = point_to_voxel(x4, z1)
        y1.F = self.dropout(y1.F)
        y1 = self.up1[0](y1)
        y1 = torchsparse.cat([y1, x3])
        y1 = self.up1[1](y1)

        y2 = self.up2[0](y1)
        y2 = torchsparse.cat([y2, x2])
        y2 = self.up2[1](y2)
        z2 = voxel_to_point(y2, z1)
        z2.F = z2.F + self.point_transforms[1](z1.F)

        y3 = point_to_voxel(y2, z2)
        y3.F = self.dropout(y3.F)
        y3 = self.up3[0](y3)
        y3 = torchsparse.cat([y3, x1])
        y3 = self.up3[1](y3)

        y4 = self.up4[0](y3)
        y4 = torchsparse.cat([y4, x0])
        y4 = self.up4[1](y4)
        z3 = voxel_to_point(y4, z2)
        z3.F = z3.F + self.point_transforms[2](z2.F)

        out = self.classifier(z3.F)                 # point
        return out

def get_model( **kwargs):
    return SPVCNN(kwargs)

if __name__=='__main__':
    from data_utils.s3dis.s3disDataLoader import S3DIS_sparse_Dataset
    from lib.common.visualize import visualize_with_label
    voxel_size = 0.03
    dataset = S3DIS_sparse_Dataset(
                                    root="/media/ubuntu/数据/suyunzheng_dataset/semantic-dataset-output/spvnas_s3dis",\
                                    voxel_size=voxel_size, num_points=60000, split='train')

    import time, random
    manual_seed = 123
    random.seed(manual_seed)
    np.random.seed(manual_seed)
    torch.manual_seed(manual_seed)
    torch.cuda.manual_seed_all(manual_seed)
    def worker_init_fn(worker_id):
        random.seed(manual_seed, worker_id)

    train_loader = torch.utils.data.DataLoader(
                                                dataset, batch_size=2, shuffle=True, \
                                                num_workers=8, pin_memory=True, \
                                                worker_init_fn=worker_init_fn, collate_fn = dataset.collate_fn
                                                )
    print("===>len(dataset):{}".format(len(dataset)))
    print("===>len(train_loader):{}".format(len(train_loader)))

    # model
    net = SPVCNN(num_classes = 13, init_dim = 3, pres = 1, vres = 1).to('cuda:0')
    
    end = time.time()
    for i, element in enumerate(train_loader):
        # print(element['targets'].coords)
    
        # print(element["lidar"].coords)
        
        # # 可视化点云
        # coords = element['lidar'].coords.numpy()
        # labels = element['targets'].feats.numpy()

        # coords_list0_idx = [i for i in range(coords.shape[0]) if coords[i, -1]==0]
        # coords_list1_idx = [i for i in range(coords.shape[0]) if coords[i, -1]==1]
        # coords_0 = coords[:, :-1][coords_list0_idx]
        # coords_1 = coords[:, :-1][coords_list1_idx]
        # labels_0 = labels[coords_list0_idx]
        # labels_1 = labels[coords_list1_idx]
        # visualize_with_label(coords_0, labels_0, window_name='{}-voxel{}'.format(element['file_name'][0], voxel_size))
        # visualize_with_label(coords_1, labels_1, window_name='{}-voxel{}'.format(element['file_name'][1], voxel_size))
        
        # print(element["lidar"].feats)
        print("===> time: {}/{}--{}".format(i+1, len(train_loader), time.time()-end))
        input = element['lidar'].to('cuda:0')
        target = element['targets'].to('cuda:0')
        output = net(input)
        # print(output)


        end = time.time()
