import torch
import torch.nn as nn


class CNN_LSTM_Attention(nn.Module):
    def __init__(self,configs, hparams):
        super(CNN_LSTM_Attention, self).__init__()
        filter_sizes = [5, 9, 11]
        self.conv = nn.Conv1d(configs.input_channels, configs.mid_channels, kernel_size=filter_sizes[0],
                               stride=configs.stride, bias=False, padding=(filter_sizes[0] // 2))
        self.lstm = nn.LSTM(configs.mid_channels, configs.mid_channels, batch_first=True, bidirectional=True)
        self.attn = nn.Linear(64, 1)
        # self.encoder_layer = nn.TransformerEncoderLayer(d_model=36, nhead=8,
        #                                                 batch_first=True)
        # self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=6)
        self.fc = nn.Linear(64, configs.num_classes)

    def forward(self, x):
        # CNN层
        x = self.conv(x)
        x = torch.relu(x)
        x = x.permute(0, 2, 1)  # LSTM要求(batch, seq_len, features)

        # LSTM层
        lstm_out, (h_n, c_n) = self.lstm(x)

        # 注意力机制
        attn_weights = self.attn(lstm_out).squeeze(2)  # 计算注意力权重
        attn_weights = torch.softmax(attn_weights, dim=1)  # 归一化权重
        attn_applied = torch.mul(lstm_out, attn_weights.unsqueeze(2))  # 应用注意力权重
        attn_applied = torch.sum(attn_applied, dim=1)  # 求和得到加权后的表示

        # 全连接层
        output = self.fc(attn_applied)
        return output
