from typing import List
import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F


# 复用mody_1中的通道混洗和增强版倒残差块
def channel_shuffle(x: Tensor, groups: int) -> Tensor:
    batch_size, num_channels, length = x.size()
    channels_per_group = num_channels // groups
    x = x.view(batch_size, groups, channels_per_group, length)
    x = torch.transpose(x, 1, 2).contiguous()
    return x.view(batch_size, -1, length)


class EnhancedInvertedResidual(nn.Module):
    def __init__(self, input_c: int, output_c: int, stride: int, name: str = ""):
        super().__init__()
        self.stride = stride
        self.name = name
        assert output_c % 2 == 0, f"输出通道数必须为偶数，但得到{output_c}"
        self.branch_features = output_c // 2
        assert (self.stride != 1) or (input_c == self.branch_features << 1), \
            f"当stride=1时，输入通道数必须等于输出通道数的一半，但得到input_c={input_c}, output_c={output_c}"

        if self.stride == 2:
            self.branch1 = nn.Sequential(
                self.depthwise_conv(input_c, input_c, kernel_s=3, stride=stride, padding=1),
                nn.BatchNorm1d(input_c),
                nn.Conv1d(input_c, self.branch_features, kernel_size=1, stride=1, padding=0, bias=False),
                nn.BatchNorm1d(self.branch_features),
                nn.ReLU(inplace=True)
            )
        else:
            self.branch1 = nn.Identity()

        self.branch2_conv1 = nn.Sequential(
            nn.Conv1d(input_c if stride > 1 else self.branch_features,
                      self.branch_features, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm1d(self.branch_features),
            nn.ReLU(inplace=True)
        )
        self.branch2_dwconv = self.depthwise_conv(
            self.branch_features, self.branch_features, kernel_s=3, stride=stride, padding=1
        )
        self.branch2_bn = nn.BatchNorm1d(self.branch_features)

        self.attention = nn.Sequential(
            nn.AdaptiveAvgPool1d(1),
            nn.Conv1d(self.branch_features, self.branch_features // 4, kernel_size=1, stride=1),
            nn.ReLU(inplace=True),
            nn.Conv1d(self.branch_features // 4, self.branch_features, kernel_size=1, stride=1),
            nn.Sigmoid()
        )

        self.branch2_conv2 = nn.Sequential(
            nn.Conv1d(self.branch_features, self.branch_features, kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm1d(self.branch_features),
            nn.ReLU(inplace=True)
        )

    @staticmethod
    def depthwise_conv(input_c: int, output_c: int, kernel_s: int, stride: int = 1,
                       padding: int = 0, bias: bool = False) -> nn.Conv1d:
        return nn.Conv1d(in_channels=input_c, out_channels=output_c, kernel_size=kernel_s,
                         stride=stride, padding=padding, bias=bias, groups=input_c)

    def forward(self, x: Tensor) -> Tensor:
        if self.stride == 1:
            x1, x2 = x.chunk(2, dim=1)
            x2 = self.branch2_conv1(x2)
            x2 = self.branch2_dwconv(x2)
            x2 = self.branch2_bn(x2)
            attn = self.attention(x2)
            x2 = x2 * attn
            branch2_out = self.branch2_conv2(x2)
            out = torch.cat((x1, branch2_out), dim=1)
        else:
            branch1_out = self.branch1(x)
            branch2_out = self.branch2_conv1(x)
            branch2_out = self.branch2_dwconv(branch2_out)
            branch2_out = self.branch2_bn(branch2_out)
            attn = self.attention(branch2_out)
            branch2_out = branch2_out * attn
            branch2_out = self.branch2_conv2(branch2_out)
            assert branch1_out.shape[2:] == branch2_out.shape[2:], \
                f"模块 {self.name} 分支输出形状不匹配: {branch1_out.shape} vs {branch2_out.shape}"
            out = torch.cat((branch1_out, branch2_out), dim=1)

        out = channel_shuffle(out, 2)
        return out


# 时序注意力模块（复用原模型）
class TemporalAttention(nn.Module):
    def __init__(self, hidden_size: int):
        super().__init__()
        self.attention = nn.Sequential(
            nn.Linear(hidden_size, hidden_size // 2),
            nn.Tanh(),
            nn.Linear(hidden_size // 2, 1)
        )

    def forward(self, x: Tensor) -> Tensor:
        attn_scores = self.attention(x).squeeze(-1)
        attn_weights = F.softmax(attn_scores, dim=1).unsqueeze(-1)
        weighted_x = torch.sum(x * attn_weights, dim=1)
        return weighted_x


# 改进后的ShuffleNet-LSTM-Attention模型
class ShuffleNetLSTMAttention(nn.Module):
    def __init__(self,
                 stages_repeats: List[int] = [2, 3, 2],  # 与mody_1的轻量化版本保持一致
                 stages_out_channels: List[int] = [24, 96, 192, 384],  # 调整输出通道以适配LSTM
                 lstm_hidden_size: int = 128,
                 lstm_num_layers: int = 2,
                 num_classes: int = 10,
                 dropout_rate: float = 0.5):
        super().__init__()
        self.stage_out_channels = stages_out_channels
        self.lstm_hidden_size = lstm_hidden_size
        self.lstm_num_layers = lstm_num_layers

        # -------------------------- 1. ShuffleNet特征提取模块 --------------------------
        input_channels = 2  # 保持输入通道为2，与原模型一致
        # 初始卷积层
        self.conv1 = nn.Sequential(
            nn.Conv1d(input_channels, stages_out_channels[0], kernel_size=3, stride=2, padding=1, bias=False),
            nn.BatchNorm1d(stages_out_channels[0]),
            nn.ReLU(inplace=True)
        )
        input_channels = stages_out_channels[0]

        # 池化层（保持下采样节奏）
        self.maxpool = nn.MaxPool1d(kernel_size=3, stride=2, padding=1)

        # ShuffleNet多阶段特征提取
        stage_names = ["stage2", "stage3", "stage4"]
        for name, repeats, output_c in zip(stage_names, stages_repeats, stages_out_channels[1:]):
            layers = []
            layers.append(EnhancedInvertedResidual(
                input_channels, output_c, stride=2, name=f"{name}_layer0"
            ))
            for i in range(1, repeats):
                layers.append(EnhancedInvertedResidual(
                    output_c, output_c, stride=1, name=f"{name}_layer{i}"
                ))
            setattr(self, name, nn.Sequential(*layers))
            input_channels = output_c

        # 最终卷积压缩（为LSTM准备特征维度）
        self.conv5 = nn.Sequential(
            nn.Conv1d(input_channels, stages_out_channels[-1], kernel_size=1, stride=1, padding=0, bias=False),
            nn.BatchNorm1d(stages_out_channels[-1]),
            nn.ReLU(inplace=True)
        )
        self.lstm_input_size = stages_out_channels[-1]  # LSTM输入维度=ShuffleNet最终输出通道

        # -------------------------- 2. BiLSTM时序特征捕捉 --------------------------
        self.bilstm = nn.LSTM(
            input_size=self.lstm_input_size,
            hidden_size=lstm_hidden_size,
            num_layers=lstm_num_layers,
            bidirectional=True,
            batch_first=True,
            dropout=dropout_rate if lstm_num_layers > 1 else 0
        )

        # -------------------------- 3. 注意力与分类模块 --------------------------
        self.temporal_attention = TemporalAttention(lstm_hidden_size * 2)  # 双向LSTM输出维度翻倍
        self.dropout = nn.Dropout(dropout_rate)
        self.fc = nn.Linear(lstm_hidden_size * 2, num_classes)

    def forward(self, x: Tensor) -> Tensor:
        # 输入形状：(batch_size, 2, length) → 与原模型完全一致
        batch_size = x.size(0)

        # -------------------------- 1. ShuffleNet特征提取 --------------------------
        x = self.conv1(x)          # (B, 24, L/2)
        x = self.maxpool(x)        # (B, 24, L/4)
        x = self.stage2(x)         # (B, 96, L/8)
        x = self.stage3(x)         # (B, 192, L/16)
        x = self.stage4(x)         # (B, 384, L/32)
        x_shuffle = self.conv5(x)  # (B, 384, L/32) → 保持与原CNN输出相同的序列长度比例

        # 转置为LSTM输入格式：(B, seq_len, feature_dim)
        x_lstm_input = x_shuffle.permute(0, 2, 1).contiguous()  # (B, L/32, 384)

        # -------------------------- 2. BiLSTM时序特征捕捉 --------------------------
        h0 = torch.zeros(self.lstm_num_layers * 2, batch_size, self.lstm_hidden_size, device=x.device)
        c0 = torch.zeros(self.lstm_num_layers * 2, batch_size, self.lstm_hidden_size, device=x.device)
        x_lstm, _ = self.bilstm(x_lstm_input, (h0, c0))  # (B, L/32, 256) → 256=128*2

        # -------------------------- 3. 时序注意力与分类 --------------------------
        x_attn = self.temporal_attention(x_lstm)  # (B, 256)
        x_dropout = self.dropout(x_attn)
        x_out = self.fc(x_dropout)  # (B, num_classes) → 输出形状与原模型一致

        return x_out


# 设备分类特化模型（保持与原模型接口一致）
def shufflenet_lstm_attention_improved(num_classes=10):
    return ShuffleNetLSTMAttention(
        stages_repeats=[2, 3, 2],
        stages_out_channels=[24, 96, 192, 384],  # 输出通道与原CNN的[24,48,96]保持同量级
        lstm_hidden_size=128,
        lstm_num_layers=2,
        num_classes=num_classes,
        dropout_rate=0.5
    )