import torch
import torch.nn as nn
import torch.nn.functional as F

class AttentionLayer(nn.Module):
    """注意力层"""
    def __init__(self, hidden_size):
        super(AttentionLayer, self).__init__()
        self.attention = nn.Linear(hidden_size, 1)
        
    def forward(self, x):
        # x: (batch_size, seq_len, hidden_size)
        # 计算注意力权重
        attention_weights = F.softmax(self.attention(x), dim=1)  # (batch_size, seq_len, 1)
        
        # 应用注意力权重
        context = torch.sum(attention_weights * x, dim=1)  # (batch_size, hidden_size)
        
        return context, attention_weights

class HARCNNLSTMAttention(nn.Module):
    """
    带注意力机制的CNN-LSTM混合模型用于人类活动识别
    """
    def __init__(self, input_size=561, hidden_size=128, num_layers=2, num_classes=6, dropout=0.5):
        super(HARCNNLSTMAttention, self).__init__()
        
        # 将输入特征重塑为2D形式以用于CNN
        self.channels = 3  # 假设为三轴加速度计数据
        self.seq_length = input_size // self.channels
        
        # CNN层
        self.conv1 = nn.Conv1d(self.channels, 64, kernel_size=3, padding=1)
        self.bn1 = nn.BatchNorm1d(64)
        self.conv2 = nn.Conv1d(64, 128, kernel_size=3, padding=1)
        self.bn2 = nn.BatchNorm1d(128)
        self.pool = nn.MaxPool1d(kernel_size=2, stride=2)
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size=128,  # CNN输出的通道数
            hidden_size=hidden_size,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0,
            bidirectional=True
        )
        
        # 注意力层
        self.attention = AttentionLayer(hidden_size * 2)  # *2是因为双向LSTM
        
        # 全连接层
        self.fc = nn.Sequential(
            nn.Linear(hidden_size * 2, hidden_size),
            nn.BatchNorm1d(hidden_size),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_size, num_classes)
        )
        
    def forward(self, x):
        batch_size = x.size(0)
        
        # 重塑输入以适应CNN
        x = x.view(batch_size, self.channels, -1)
        
        # CNN特征提取
        x = F.relu(self.bn1(self.conv1(x)))
        x = self.pool(x)
        x = F.relu(self.bn2(self.conv2(x)))
        x = self.pool(x)
        
        # 重塑以适应LSTM (batch_size, seq_len, features)
        x = x.permute(0, 2, 1)  # 从(batch, channels, seq_len)变为(batch, seq_len, channels)
        
        # LSTM处理
        x, _ = self.lstm(x)
        
        # 应用注意力机制
        x, _ = self.attention(x)
        
        # 分类
        x = self.fc(x)
        
        return x