import torch
import torch.nn as nn
import torch.nn.functional as F

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class GRN(nn.Module):
    """ 全局响应归一化层（ConvNeXtV2核心组件）"""

    def __init__(self, dim):
        super().__init__()
        self.gamma = nn.Parameter(torch.zeros(1, 1, 1, dim))
        self.beta = nn.Parameter(torch.zeros(1, 1, 1, dim))

    def forward(self, x):
        Gx = torch.norm(x, p=2, dim=(1, 2), keepdim=True)
        Nx = Gx / (Gx.mean(dim=-1, keepdim=True) + 1e-6)
        return self.gamma * (x * Nx) + self.beta + x


class ConvNeXtV2Block(nn.Module):
    """ ConvNeXtV2基础块（替换原始残差块）"""

    def __init__(self, dim, layer_scale_init_value=1e-6):
        super().__init__()
        self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)
        self.norm = nn.LayerNorm(dim, eps=1e-6)
        self.pwconv1 = nn.Linear(dim, 4 * dim)
        self.grn = GRN(4 * dim)
        self.pwconv2 = nn.Linear(4 * dim, dim)
        self.gamma = nn.Parameter(layer_scale_init_value * torch.ones(dim)) if layer_scale_init_value > 0 else None

    def forward(self, x):
        input = x
        x = self.dwconv(x)
        x = x.permute(0, 2, 3, 1)  # [N, C, H, W] → [N, H, W, C]
        x = self.norm(x)
        x = self.pwconv1(x)
        x = self.grn(x)
        x = self.pwconv2(x)
        if self.gamma is not None:
            x = self.gamma * x
        x = x.permute(0, 3, 1, 2)  # [N, H, W, C] → [N, C, H, W]
        return input + x

class ChannelAttention(nn.Module):
    def __init__(self, in_planes, ratio=16):
        super(ChannelAttention, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.max_pool = nn.AdaptiveMaxPool2d(1)

        self.fc1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)
        self.relu1 = nn.ReLU()
        self.fc2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = self.fc2(self.relu1(self.fc1(self.avg_pool(x))))
        max_out = self.fc2(self.relu1(self.fc1(self.max_pool(x))))
        out = avg_out + max_out
        return self.sigmoid(out)


class SpatialAttention(nn.Module):
    def __init__(self, kernel_size=7):
        super(SpatialAttention, self).__init__()

        assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
        padding = 3 if kernel_size == 7 else 1

        self.conv1 = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        avg_out = torch.mean(x, dim=1, keepdim=True)
        max_out, _ = torch.max(x, dim=1, keepdim=True)
        x = torch.cat([avg_out, max_out], dim=1)
        x = self.conv1(x)
        return self.sigmoid(x)


class AttentionModule(nn.Module):
    def __init__(self, in_planes):
        super(AttentionModule, self).__init__()
        self.ca = ChannelAttention(in_planes).to(device)
        self.sa = SpatialAttention().to(device)

    def forward(self, x):
        x = self.ca(x) * x
        x = self.sa(x) * x
        return x

class TransformerEncoder(nn.Module):
    def __init__(self, d_model=1024, nhead=8, dim_feedforward=2048, dropout=0.1):
        super(TransformerEncoder, self).__init__()
        # 多头注意力机制
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
        # 前馈网络
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)
        # 层归一化
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)

    def forward(self, src):
        # 输入张量形状调整为 (seq_len, batch_size, d_model)
        src = src.unsqueeze(0)  # 添加序列长度维度，这里假设序列长度为 1
        src2 = self.self_attn(src, src, src)[0]
        src = src + self.dropout1(src2)
        src = self.norm1(src)
        src2 = self.linear2(self.dropout(torch.relu(self.linear1(src))))
        src = src + self.dropout2(src2)
        src = self.norm2(src)
        # 恢复为 (batch_size, d_model) 形状
        src = src.squeeze(0)
        return src

class MultiStreamConvNeXtV2(nn.Module):
    """ 改进后的多流网络（五分类任务）"""

    def __init__(self, in_channels=3, num_classes=5):
        super().__init__()

        # 分支1（原始图像处理）
        self.stem1 = nn.Sequential(
            nn.Conv2d(in_channels, 32, 3, padding=1, bias=False),
            nn.MaxPool2d(2, 2)
        )
        self.block1 = nn.Sequential(
            ConvNeXtV2Block(32),
            ConvNeXtV2Block(32),
            nn.MaxPool2d(2, 2)
        )

        # 分支2（深度信息处理）
        self.stem2 = nn.Sequential(
            nn.Conv2d(in_channels, 32, 7, padding=3, bias=False),
            nn.MaxPool2d(2, 2)
        )
        self.block2 = nn.Sequential(
            ConvNeXtV2Block(32),
            ConvNeXtV2Block(32),
            nn.MaxPool2d(2, 2)
        )

        # 分支3（结构化数据）
        self.fc_block = nn.Sequential(
            nn.Linear(42, 128),
            nn.BatchNorm1d(128),  # 替换 Dropout 为 BatchNorm1d
            nn.GELU(),
            nn.Linear(128, 256),
            nn.BatchNorm1d(256),  # 替换 Dropout 为 BatchNorm1d
            nn.GELU(),
            nn.Linear(256, 512),
            nn.BatchNorm1d(512)  # 替换 Dropout 为 BatchNorm1d
        )

        self.class_head = nn.Sequential(
            nn.Linear(1024, 512),
            nn.BatchNorm1d(512),  # 替换 Dropout 为 BatchNorm1d
            nn.GELU(),
            nn.Linear(512, num_classes)
        )

        # 特征融合模块
        self.fusion = nn.ModuleList([
            ConvNeXtV2Block(64),
            ConvNeXtV2Block(128),
            ConvNeXtV2Block(256)
        ])

        # 分类头（ConvNeXtV2设计规范）
        self.before_add = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Flatten(),
            nn.LayerNorm(512),
        )

        # 通道调整层
        self.channel_adjust = nn.ModuleDict({
            'layer1': nn.Conv2d(64, 128, 1),
            'layer2': nn.Conv2d(128, 256, 1),
            'layer3': nn.Conv2d(256, 512, 1)
        })

    def forward(self, x1, x2, x3):
        # 分支1处理
        f1 = self.stem1(x1)
        f1 = self.block1(f1)

        # 分支2处理
        f2 = self.stem2(x2)
        f2 = self.block2(f2)

        # 分支融合
        fused = torch.cat([f1, f2], dim=1)  # 通道拼接

        #加入空间注意力机制和通道注意力机制
        attention_module = AttentionModule(64)
        fused = attention_module(fused)

        # 多级特征融合
        for i, block in enumerate(self.fusion):
            fused = block(fused)
            fused = self.channel_adjust[f'layer{i + 1}'](fused)
            fused = F.max_pool2d(fused, 2)

        attention_module = AttentionModule(512)
        fused = attention_module(fused)

            # 结构化数据分支
        tabular_feat = self.fc_block(x3)

        # 最终分类
        before_img = self.before_add(fused)
        out = torch.cat([before_img,tabular_feat],-1)

        #加入通道注意力机制
        # transformer = TransformerEncoder(d_model=1024).to(device)
        # out = transformer(out)
        out = self.class_head(out)  # 特征融合
        return out


from torchinfo import summary

if __name__ == "__main__":
    model = MultiStreamConvNeXtV2(in_channels=3, num_classes=4)
    rgb = torch.randn([2, 3, 256, 256],device=device)
    depth = torch.randn([2, 3, 256, 256],device=device)
    tabular = torch.randn([2, 42],device=device)
    output = model(rgb, depth, tabular)
    model.to(device=device)


    summary(
        model,
        input_size=[(2,3, 256, 256), (2,3, 256, 256), (2,42)],  # 对应三个输入分支
        depth=5,  # 显示嵌套层级
        col_names=["input_size", "output_size", "num_params"]
    )