import torch
import torch.nn as nn
import math

from no_attention.config import Config


class MultiHeadAttention(nn.Module):
    def __init__(self, hidden_dim, n_heads):
        super().__init__()
        self.hidden_dim = hidden_dim
        self.n_heads = n_heads
        self.head_dim = hidden_dim // n_heads

        self.q_proj = nn.Linear(hidden_dim, hidden_dim)
        self.k_proj = nn.Linear(hidden_dim, hidden_dim)
        self.v_proj = nn.Linear(hidden_dim, hidden_dim)
        self.out_proj = nn.Linear(hidden_dim, hidden_dim)

    def forward(self, x):
        batch_size, seq_len, _ = x.size()

        # 投影变换
        Q = self.q_proj(x).view(batch_size, seq_len, self.n_heads, self.head_dim).transpose(1, 2)
        K = self.k_proj(x).view(batch_size, seq_len, self.n_heads, self.head_dim).transpose(1, 2)
        V = self.v_proj(x).view(batch_size, seq_len, self.n_heads, self.head_dim).transpose(1, 2)

        # 注意力计算
        scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(self.head_dim)
        attn_weights = torch.softmax(scores, dim=-1)
        context = torch.matmul(attn_weights, V)

        # 合并多头
        context = context.transpose(1, 2).contiguous().view(batch_size, seq_len, self.hidden_dim)
        return self.out_proj(context)


class EnhancedModel(nn.Module):
    def __init__(self, vocab_size):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, Config.EMBED_DIM)
        
        # 共享编码器
        self.encoder = nn.LSTM(
            input_size=Config.EMBED_DIM,
            hidden_size=Config.HIDDEN_DIM//2,
            num_layers=2,
            bidirectional=True,
            batch_first=True
        )
        
        # 主语识别头
        self.subject_head = nn.Sequential(
            nn.Linear(Config.HIDDEN_DIM, Config.HIDDEN_DIM//2),
            nn.ReLU(),
            nn.Linear(Config.HIDDEN_DIM//2, 1)
        )
        
        # 情感分类头
        self.sentiment_head = nn.Sequential(
            nn.Linear(Config.HIDDEN_DIM, Config.HIDDEN_DIM//2),
            nn.ReLU(),
            nn.Linear(Config.HIDDEN_DIM//2, 2)
        )
        
        # 新增意图分类头
        self.intent_head = nn.Sequential(
            nn.Linear(Config.HIDDEN_DIM, Config.HIDDEN_DIM//2),
            nn.ReLU(),
            nn.Linear(Config.HIDDEN_DIM//2, len(Config.INTENT_CLASSES))
        )
        
    def forward(self, x):
        embeds = self.embedding(x)
        lstm_out, _ = self.encoder(embeds)
        
        # 主语识别
        subject_logits = self.subject_head(lstm_out)
        
        # 情感分类
        pooled = torch.mean(lstm_out, dim=1)
        sentiment_logits = self.sentiment_head(pooled)
        
        # 意图分类
        intent_logits = self.intent_head(pooled)
        
        return subject_logits.squeeze(-1), sentiment_logits, intent_logits