import spconv as spconv
import torch
import torch.nn as nn

# 定义输入数据
batch_size = 2
num_points = 10
in_channels = 64
out_channels = 64

# 生成 4 维坐标 (batch, z, y, x)
coords = torch.randint(0, 10, size=(num_points, 4), dtype=torch.int32)
coords[:, 0] = torch.randint(0, batch_size, size=(num_points,))  # 分配 batch 索引

# 生成特征
features = torch.randn(num_points, in_channels)

# 计算空间形状（排除 batch 维度）
max_coords = coords.max(dim=0)[0] + 10
spatial_shape = tuple(max_coords[1:].tolist())
print(max_coords)
print(spatial_shape)

# 创建稀疏张量
input_tensor = spconv.SparseConvTensor(features, coords, spatial_shape, batch_size)

# 定义稀疏卷积模型
class SparseConvModel(torch.nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv = spconv.SubMConv3d(
            in_channels=in_channels,
            out_channels=out_channels,
            kernel_size=3,
            stride=1,
            dilation=1,
            bias=False
        )

    def forward(self, x):
        return self.conv(x)

class ResSPConvBlock(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size = 3, indice_key = "key1"):
        super().__init__()
        self.spconv1 = spconv.SubMConv3d(in_channels=in_channels, out_channels=out_channels, \
                                               stride = 1, kernel_size=kernel_size, indice_key = indice_key)
        self.spconv2 = spconv.SubMConv3d(in_channels=out_channels, out_channels=out_channels, \
                                               stride = 1, kernel_size=kernel_size, indice_key = indice_key)
        self.instance_norm1 = nn.LayerNorm(out_channels)
        self.instance_norm2 = nn.LayerNorm(out_channels)
        
        self.relu = nn.ReLU()
        self.resnetblock = spconv.SparseSequential(
            self.spconv1,
            self.instance_norm1,
            self.relu,
            self.spconv2,
            self.instance_norm2
        )

    def forward(self, features, coordinates, spatial_shape, batch_size):
        input_sparse = spconv.SparseConvTensor(features, coordinates, spatial_shape, batch_size)
        residual = input_sparse
        out = self.resnetblock(input_sparse)
        out = out + residual
        output = self.relu(out.features)
        return output, out.indices

class ConvFFN(nn.Module):
    def __init__(self, d_model, dim_feedforward, mlp_dropout=0, kernel_size = 3, indice_key = "key1"):
        super().__init__()
        self.spconv1 = spconv.SubMConv3d(in_channels=dim_feedforward, out_channels=dim_feedforward, \
                                               kernel_size=kernel_size, indice_key = indice_key)
        # Implementation of Feedforward model
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout = nn.Dropout(mlp_dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)
        self.d_model = d_model
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout2 = nn.Identity()
        self.gelu = nn.functional.gelu
        self.relu = nn.functional.relu

    def forward(self, src, voxel_coords, spatial_shape, batch_size):
        src2 = self.relu(self.linear1(src)).float()
        input_sparse = spconv.SparseConvTensor(src, voxel_coords, spatial_shape, batch_size)
        src2 = self.spconv1(input_sparse).features
        src2 = self.linear2(self.dropout(self.gelu(src2)))
        src = src + self.dropout2(src2)
        src = self.norm2(src)
        return src
    

# 初始化模型
model = ConvFFN(in_channels, out_channels)

# 前向传播
output_f = model(features, coords, spatial_shape, batch_size)

# 输出结果
print("输入坐标:", input_tensor.indices)
print("输入特征形状:", input_tensor.features.shape)
# print("输出坐标:", output_c)
# print("输出特征形状:", output_f.shape)