from typing import List
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F


def channel_shuffle(x: Tensor, groups: int) -> Tensor:
    """通道混洗：增强跨通道特征交互"""
    batch_size, num_channels, length = x.size()
    channels_per_group = num_channels // groups
    x = x.view(batch_size, groups, channels_per_group, length)
    x = torch.transpose(x, 1, 2).contiguous()  # 交换组和通道维度
    return x.view(batch_size, -1, length)


class EnhancedInvertedResidual(nn.Module):
    """增强版倒残差块：含通道注意力机制，优化特征提取能力"""

    def __init__(self, input_c: int, output_c: int, stride: int, name: str = ""):
        super().__init__()
        self.stride = stride
        self.name = name
        assert output_c % 2 == 0, f"输出通道数必须为偶数，但得到{output_c}"
        self.branch_features = output_c // 2
        assert (self.stride != 1) or (input_c == self.branch_features << 1), \
            f"当stride=1时，输入通道数必须等于输出通道数的一半，但得到input_c={input_c}, output_c={output_c}"

        # 分支1：仅在stride=2时有效（下采样+特征转换）
        if self.stride == 2:
            self.branch1 = nn.Sequential(
                self.depthwise_conv(input_c, input_c, kernel_s=3, stride=stride, padding=1),
                nn.BatchNorm1d(input_c),
                nn.Conv1d(input_c, self.branch_features, kernel_size=1, stride=1, padding=0, bias=False),
                nn.BatchNorm1d(self.branch_features),
                nn.ReLU(inplace=True)
            )
        else:
            self.branch1 = nn.Identity()  # stride=1时直接跳过

        # 分支2：主特征提取路径（含通道注意力）
        self.branch2_conv1 = nn.Sequential(
            nn.Conv1d(input_c if stride > 1 else self.branch_features,
                      self.branch_features, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm1d(self.branch_features),
            nn.ReLU(inplace=True)
        )
        self.branch2_dwconv = self.depthwise_conv(
            self.branch_features, self.branch_features, kernel_s=3, stride=stride, padding=1
        )
        self.branch2_bn = nn.BatchNorm1d(self.branch_features)

        # 通道注意力机制（聚焦关键特征通道）
        self.attention = nn.Sequential(
            nn.AdaptiveAvgPool1d(1),  # 全局平均池化获取通道统计量 (B, C, 1)
            nn.Conv1d(self.branch_features, self.branch_features // 4, kernel_size=1, stride=1),
            nn.ReLU(inplace=True),
            nn.Conv1d(self.branch_features // 4, self.branch_features, kernel_size=1, stride=1),
            nn.Sigmoid()  # 生成通道权重
        )

        self.branch2_conv2 = nn.Sequential(
            nn.Conv1d(self.branch_features, self.branch_features, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm1d(self.branch_features),
            nn.ReLU(inplace=True)
        )

    @staticmethod
    def depthwise_conv(input_c: int, output_c: int, kernel_s: int, stride: int = 1,
                       padding: int = 0, bias: bool = False) -> nn.Conv1d:
        """深度可分离卷积：减少计算量的同时保留局部特征"""
        return nn.Conv1d(in_channels=input_c, out_channels=output_c, kernel_size=kernel_s,
                         stride=stride, padding=padding, bias=bias, groups=input_c)

    def forward(self, x: Tensor) -> Tensor:
        if self.stride == 1:
            x1, x2 = x.chunk(2, dim=1)  # 拆分通道
            # 分支2前向计算（含注意力加权）
            x2 = self.branch2_conv1(x2)
            x2 = self.branch2_dwconv(x2)
            x2 = self.branch2_bn(x2)
            attn = self.attention(x2)  # 计算通道注意力权重
            x2 = x2 * attn  # 应用注意力
            branch2_out = self.branch2_conv2(x2)
            out = torch.cat((x1, branch2_out), dim=1)
        else:
            # 分支1前向计算
            branch1_out = self.branch1(x)
            # 分支2前向计算（含注意力加权）
            branch2_out = self.branch2_conv1(x)
            branch2_out = self.branch2_dwconv(branch2_out)
            branch2_out = self.branch2_bn(branch2_out)
            attn = self.attention(branch2_out)  # 计算通道注意力权重
            branch2_out = branch2_out * attn  # 应用注意力
            branch2_out = self.branch2_conv2(branch2_out)

            # 确保分支输出形状匹配
            assert branch1_out.shape[2:] == branch2_out.shape[2:], \
                f"模块 {self.name} 分支输出形状不匹配: {branch1_out.shape} vs {branch2_out.shape}"

            out = torch.cat((branch1_out, branch2_out), dim=1)

        out = channel_shuffle(out, 2)  # 通道混洗增强跨通道交互
        return out


class ShuffleNetEnhanced(nn.Module):
    """纯ShuffleNet改进模型：保留通道注意力和轻量级特征提取，移除Transformer"""

    def __init__(self,
                 stages_repeats: List[int] = [2, 3, 2],
                 stages_out_channels: List[int] = [24, 96, 192, 384],
                 num_classes: int = 10):
        super().__init__()
        self.stage_out_channels = stages_out_channels

        # 1. 初始卷积层（输入CSI特征为2通道：幅度+相位）
        input_channels = 2
        self.conv1 = nn.Sequential(
            nn.Conv1d(input_channels, stages_out_channels[0], kernel_size=3, stride=2, padding=1, bias=False),
            nn.BatchNorm1d(stages_out_channels[0]),
            nn.ReLU(inplace=True)
        )
        input_channels = stages_out_channels[0]

        # 2. 最大池化层（下采样减少序列长度）
        self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)

        # 3. 多阶段特征提取（含增强版倒残差块）
        stage_names = ["stage2", "stage3", "stage4"]
        for name, repeats, output_c in zip(stage_names, stages_repeats, stages_out_channels[1:]):
            layers = []
            # 第一个层使用stride=2下采样
            layers.append(EnhancedInvertedResidual(
                input_channels, output_c, stride=2, name=f"{name}_layer0"
            ))
            # 后续层使用stride=1加深特征
            for i in range(1, repeats):
                layers.append(EnhancedInvertedResidual(
                    output_c, output_c, stride=1, name=f"{name}_layer{i}"
                ))
            setattr(self, name, nn.Sequential(*layers))
            input_channels = output_c  # 更新输入通道为当前stage输出

        # 4. 全局池化层（将序列维度压缩为1）
        self.global_pool = nn.AdaptiveAvgPool1d(1)

        # 5. 分类头（轻量级全连接层）
        self.classifier = nn.Sequential(
            nn.Linear(input_channels, input_channels // 2),  # 降维减少参数
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),  # 防止过拟合
            nn.Linear(input_channels // 2, num_classes)  # 输出设备类别
        )

    def forward(self, x: Tensor) -> Tensor:
        # 输入形状：(batch_size, 2, seq_len) （2通道：幅度+相位，seq_len：子载波/时间序列长度）

        # 初始卷积+池化
        x = self.conv1(x)  # (B, 24, L1)
        x = self.maxpool(x)  # (B, 24, L2)

        # 多阶段特征提取
        x = self.stage2(x)  # (B, 96, L3)
        x = self.stage3(x)  # (B, 192, L4)
        x = self.stage4(x)  # (B, 384, L5)

        # 全局池化（压缩序列维度）
        x = self.global_pool(x)  # (B, 384, 1)
        x = x.flatten(1)  # (B, 384) 展平为特征向量

        # 分类输出
        x = self.classifier(x)  # (B, num_classes)

        return x


# 设备分类特化模型（默认10类设备）
def shufflenet_enhanced(num_classes=10):
    return ShuffleNetEnhanced(
        stages_repeats=[2, 3, 2],  # 各stage的重复次数
        stages_out_channels=[24, 96, 192, 384],  # 各stage的输出通道数
        num_classes=num_classes
    )