import torch
from loguru import logger
from torch import nn


class EnhancedAttention(nn.Module):
    """增强型多头注意力机制"""

    def __init__(self, hidden_dim, num_heads=4):
        super().__init__()
        self.num_heads = num_heads
        self.head_dim = hidden_dim // num_heads

        self.query = nn.Linear(hidden_dim, hidden_dim)
        self.key = nn.Linear(hidden_dim, hidden_dim)
        self.value = nn.Linear(hidden_dim, hidden_dim)
        self.scale = torch.sqrt(torch.FloatTensor([self.head_dim]))

    def forward(self, x):
        batch_size = x.size(0)
        Q = self.query(x)
        K = self.key(x)
        V = self.value(x)

        # 多头切分
        Q = Q.view(batch_size, -1, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        K = K.view(batch_size, -1, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
        V = V.view(batch_size, -1, self.num_heads, self.head_dim).permute(0, 2, 1, 3)

        # 注意力得分
        scores = torch.matmul(Q, K.permute(0, 1, 3, 2)) / self.scale.to(x.device)
        attn_weights = torch.softmax(scores, dim=-1)

        # 上下文聚合
        context = torch.matmul(attn_weights, V)
        context = context.permute(0, 2, 1, 3).contiguous().view(batch_size, -1, self.num_heads * self.head_dim)
        return torch.mean(context, dim=1)


class DDoSDetector(nn.Module):
    def __init__(self, input_dim: int):
        super().__init__()
        logger.debug("使用CNN+双向GRU+增强型多头注意力机制+分类器构建DDoS检测模型")
        # 增强的CNN结构
        self.cnn = nn.Sequential(
            nn.Conv1d(input_dim, 128, 5, padding=2),
            nn.BatchNorm1d(128),
            nn.GELU(),
            nn.Dropout(0.5),
            nn.Conv1d(128, 64, 3, padding=1),
            nn.BatchNorm1d(64),
            nn.GELU(),
            nn.Dropout(0.3)
        )

        # 双向GRU
        self.gru = nn.GRU(64, 64, bidirectional=True, batch_first=True)

        # 注意力机制
        self.attention = EnhancedAttention(128)  # 双向GRU输出128维

        # 分类器
        self.classifier = nn.Sequential(
            nn.Linear(128, 64),
            nn.ReLU(),
            nn.Dropout(0.2),
            nn.Linear(64, 1)
        )

    def forward(self, x):
        x = x.permute(0, 2, 1)
        x = self.cnn(x)
        x = x.permute(0, 2, 1)
        gru_out, _ = self.gru(x)
        context = self.attention(gru_out)
        return torch.sigmoid(self.classifier(context))
