import torch
import torch.nn as nn
import torch.nn.functional as F

class SelfAttention(nn.Module):
    """自注意力机制模块"""
    def __init__(self, hidden_size):
        super(SelfAttention, self).__init__()
        self.hidden_size = hidden_size
        self.query = nn.Linear(hidden_size, hidden_size)
        self.key = nn.Linear(hidden_size, hidden_size)
        self.value = nn.Linear(hidden_size, hidden_size)
        
    def forward(self, x):
        # x: (batch_size, seq_len, hidden_size)
        batch_size, seq_len, hidden_size = x.size()
        
        # 计算查询、键、值
        q = self.query(x)  # (batch_size, seq_len, hidden_size)
        k = self.key(x)    # (batch_size, seq_len, hidden_size)
        v = self.value(x)  # (batch_size, seq_len, hidden_size)
        
        # 计算注意力分数
        scores = torch.matmul(q, k.transpose(-2, -1)) / (self.hidden_size ** 0.5)  # (batch_size, seq_len, seq_len)
        
        # 应用softmax获取注意力权重
        attention_weights = F.softmax(scores, dim=-1)  # (batch_size, seq_len, seq_len)
        
        # 应用注意力权重
        context = torch.matmul(attention_weights, v)  # (batch_size, seq_len, hidden_size)
        
        return context, attention_weights

class HARAttention(nn.Module):
    """基于注意力机制的人类活动识别模型"""
    def __init__(self, input_size=561, hidden_size=128, num_layers=2, num_classes=6, dropout=0.5):
        super(HARAttention, self).__init__()
        
        # 特征提取层
        self.feature_extractor = nn.Sequential(
            nn.Linear(input_size, hidden_size * 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_size * 2, hidden_size),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 重塑为序列形式
        self.seq_length = 10  # 假设我们将特征分成10个时间步
        self.feature_size = hidden_size // self.seq_length
        # 确保特征大小能够被序列长度整除
        self.hidden_size = self.feature_size * self.seq_length
        
        # 自注意力层
        self.self_attention = SelfAttention(self.feature_size)
        
        # LSTM层
        self.lstm = nn.LSTM(
            input_size=self.feature_size,
            hidden_size=self.feature_size,
            num_layers=num_layers,
            batch_first=True,
            dropout=dropout if num_layers > 1 else 0,
            bidirectional=True
        )
        
        # 分类层
        self.classifier = nn.Sequential(
            nn.Linear(self.feature_size * 2, num_classes),  # *2是因为双向LSTM
            nn.Dropout(dropout)
        )
        
    def forward(self, x):
        batch_size = x.size(0)
        
        # 特征提取
        x = self.feature_extractor(x)  # (batch_size, hidden_size)
        
        # 确保特征大小正确，如果需要，进行截断或填充
        if x.size(1) != self.hidden_size:
            # 如果特征大小不匹配，调整到正确的大小
            x = x[:, :self.hidden_size]
        
        # 重塑为序列
        x = x.view(batch_size, self.seq_length, self.feature_size)  # (batch_size, seq_len, feature_size)
        
        # 自注意力
        x, _ = self.self_attention(x)  # (batch_size, seq_len, feature_size)
        
        # LSTM处理
        x, _ = self.lstm(x)  # (batch_size, seq_len, feature_size*2)
        
        # 使用最后一个时间步的输出
        x = x[:, -1, :]  # (batch_size, feature_size*2)
        
        # 分类
        x = self.classifier(x)  # (batch_size, num_classes)
        
        return x