import torch
import torch.nn as nn
import spconv.pytorch as spconv
from spconv.pytorch import SparseConv3d, SubMConv3d





class VoxelizationLayer(nn.Module):
    def __init__(self, voxel_size, point_cloud_range):
        super(VoxelizationLayer, self).__init__()
        self.voxel_size = voxel_size
        self.point_cloud_range = point_cloud_range

    def forward(self, point_cloud_batch):
        grid_size = (self.point_cloud_range[3:] - self.point_cloud_range[:3]) / self.voxel_size
        batch_voxel_features = []
        batch_coord_indices = []

        for point_cloud in point_cloud_batch:
            coord_indices = torch.floor((point_cloud[:, :3] - self.point_cloud_range[:3]) / self.voxel_size).int()
            valid_mask = ((coord_indices >= 0) & (coord_indices < grid_size)).all(dim=1)
            coord_indices = coord_indices[valid_mask]
            points = point_cloud[valid_mask]

            voxel_dict = {}
            for i, coord in enumerate(coord_indices):
                coord_tuple = tuple(coord.tolist())
                if coord_tuple not in voxel_dict:
                    voxel_dict[coord_tuple] = []
                voxel_dict[coord_tuple].append(points[i])

            voxel_features = []
            for coord_tuple, voxel in voxel_dict.items():
                voxel_stack = torch.stack(voxel)
                mean_features = torch.mean(voxel_stack, dim=0)
                voxel_features.append(mean_features)

            batch_voxel_features.append(torch.stack(voxel_features))
            batch_coord_indices.append(torch.stack([torch.tensor(coord) for coord in voxel_dict.keys()]))

        return batch_voxel_features, batch_coord_indices, grid_size


class SPConvNet(nn.Module):
    def __init__(self):
        super(SPConvNet, self).__init__()

        self.block1 = nn.Sequential(
            SubMConv3d(4, 16, kernel_size=3, stride=1, padding=1, indice_key="subm0"),
            spconv.modules.SparseReLU(),
            SubMConv3d(16, 16, kernel_size=3, stride=1, padding=1, indice_key="subm0"),
            spconv.modules.SparseReLU(),
            SparseConv3d(16, 32, kernel_size=3, stride=2, padding=1),
            spconv.modules.SparseReLU()
        )

        self.block2 = nn.Sequential(
            SubMConv3d(32, 32, kernel_size=3, stride=1, padding=1, indice_key="subm1"),
            spconv.modules.SparseReLU(),
            SubMConv3d(32, 32, kernel_size=3, stride=1, padding=1, indice_key="subm1"),
            spconv.modules.SparseReLU(),
            SparseConv3d(32, 64, kernel_size=3, stride=2, padding=1),
            spconv.modules.SparseReLU()
        )

        self.block3 = nn.Sequential(
            SubMConv3d(64, 64, kernel_size=3, stride=1, padding=1, indice_key="subm2"),
            spconv.modules.SparseReLU(),
            SubMConv3d(64, 64, kernel_size=3, stride=1, padding=1, indice_key="subm2"),
            spconv.modules.SparseReLU(),
            SubMConv3d(64, 64, kernel_size=3, stride=1, padding=1, indice_key="subm2"),
            spconv.modules.SparseReLU(),
            SparseConv3d(64, 64, kernel_size=3, stride=2, padding=[0, 1, 1]),
            spconv.modules.SparseReLU()
        )

        self.block4 = nn.Sequential(
            SubMConv3d(64, 64, kernel_size=3, stride=1, padding=1, indice_key="subm3"),
            spconv.modules.SparseReLU(),
            SubMConv3d(64, 64, kernel_size=3, stride=1, padding=1, indice_key="subm3"),
            spconv.modules.SparseReLU(),
            SubMConv3d(64, 64, kernel_size=3, stride=1, padding=1, indice_key="subm3"),
            spconv.modules.SparseReLU(),
            SparseConv3d(64, 64, kernel_size=(3, 1, 1), stride=(2, 1, 1)),
            spconv.modules.SparseReLU()
        )

    def forward(self, voxel_features, coord_indices, batch_size, input_shape):
        batch_indices = []
        all_coords = []
        all_features = []

        for batch_id, (voxels, coords) in enumerate(zip(voxel_features, coord_indices)):
            N = coords.shape[0]
            batch_indices.append(torch.full((N, 1), batch_id, dtype=torch.int))
            all_coords.append(coords)
            all_features.append(voxels)

        all_coords = torch.cat([torch.cat([b, c], dim=1) for b, c in zip(batch_indices, all_coords)], dim=0)
        all_features = torch.cat(all_features, dim=0)

        # 创建SparseConvTensor
        x = spconv.SparseConvTensor(
            features=all_features.to(device),
            indices=all_coords.int().to(device),  # Convert indices to int32
            spatial_shape=[int(dim) for dim in input_shape.flip(dims=(0,))],  # Ensure spatial_shape uses int values
            batch_size=batch_size
        )

        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.block4(x)

        x = x.dense()

        N, C, D, H, W = x.shape
        x = x.view(N, C * D, H, W)

        return x

class PointPillarsBackbone(nn.Module):
    def __init__(self, input_channel=64):
        super(PointPillarsBackbone, self).__init__()
        self.block1 = self._make_block(input_channel * 1, input_channel * 1, 4, stride=2)
        self.block2 = self._make_block(input_channel * 1, input_channel * 2, 6, stride=2)
        self.block3 = self._make_block(input_channel * 2, input_channel * 4, 6, stride=2)

        self.deconv1 = nn.ConvTranspose2d(input_channel * 1, input_channel * 2, kernel_size=1, stride=1, padding=0, output_padding=0)
        self.deconv2 = nn.ConvTranspose2d(input_channel * 2, input_channel * 2, kernel_size=3, stride=2, padding=1, output_padding=1)
        self.deconv3 = nn.ConvTranspose2d(input_channel * 4, input_channel * 2, kernel_size=5, stride=4, padding=1, output_padding=1)

        self.bn_deconv1 = nn.BatchNorm2d(input_channel * 2)
        self.bn_deconv2 = nn.BatchNorm2d(input_channel * 2)
        self.bn_deconv3 = nn.BatchNorm2d(input_channel * 2)

    def _make_block(self, in_channels, out_channels, num_layers, stride):
        layers = []
        first_layer = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1)
        layers.append(first_layer)
        layers.append(nn.BatchNorm2d(out_channels))
        layers.append(nn.ReLU(inplace=True))

        for _ in range(1, num_layers):
            layers.append(nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1))
            layers.append(nn.BatchNorm2d(out_channels))
            layers.append(nn.ReLU(inplace=True))

        return nn.Sequential(*layers)

    def forward(self, x):
        x1 = self.block1(x)
        x2 = self.block2(x1)
        x3 = self.block3(x2)

        up1 = self.bn_deconv1(self.deconv1(x1))
        up2 = self.bn_deconv2(self.deconv2(x2))
        up3 = self.bn_deconv3(self.deconv3(x3))

        out = torch.cat([up1, up2, up3], dim=1)
        return out





class CIASSD(nn.Module):
    def __init__(self):
        super(CIASSD, self).__init__()
        self.voxelization = VoxelizationLayer(
            voxel_size=torch.tensor([0.05, 0.05, 0.1]).to(device), 
            point_cloud_range=torch.tensor([0, -40, -3, 70.4, 40, 1]).to(device)
        )
        self.spconv_net = SPConvNet()
        self.backbone = PointPillarsBackbone(64)


        self.detection_head = MultiTaskHead(in_channels=256, num_classes=10)

    def forward(self, point_cloud_batch):
        voxel_features, coord_indices, grid_size = self.voxelization(point_cloud_batch)
        sparse_features = self.spconv_net(voxel_features, coord_indices, len(point_cloud_batch), grid_size)
    
        backbone_features = self.backbone(sparse_features)
        detections = self.detection_head(backbone_features)
        return detections


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = CIASSD().to(device)
point_cloud_batch = torch.rand((2, 30000, 4), dtype=torch.float32) * torch.tensor([70.4, 80, 4, 1]) - torch.tensor([0, 40, 3, 0])

point_cloud_batch = point_cloud_batch.to(device)
output = model(point_cloud_batch)
