import torch
import torch.nn as nn
import math

from no_attention.config import Config


class MultiHeadAttention(nn.Module):
    def __init__(self, hidden_dim, n_heads):
        super().__init__()
        self.hidden_dim = hidden_dim
        self.n_heads = n_heads
        self.head_dim = hidden_dim // n_heads

        self.q_proj = nn.Linear(hidden_dim, hidden_dim)
        self.k_proj = nn.Linear(hidden_dim, hidden_dim)
        self.v_proj = nn.Linear(hidden_dim, hidden_dim)
        self.out_proj = nn.Linear(hidden_dim, hidden_dim)

    def forward(self, x):
        batch_size, seq_len, _ = x.size()

        # 投影变换
        Q = self.q_proj(x).view(batch_size, seq_len, self.n_heads, self.head_dim).transpose(1, 2)
        K = self.k_proj(x).view(batch_size, seq_len, self.n_heads, self.head_dim).transpose(1, 2)
        V = self.v_proj(x).view(batch_size, seq_len, self.n_heads, self.head_dim).transpose(1, 2)

        # 注意力计算
        scores = torch.matmul(Q, K.transpose(-2, -1)) / math.sqrt(self.head_dim)
        attn_weights = torch.softmax(scores, dim=-1)
        context = torch.matmul(attn_weights, V)

        # 合并多头
        context = context.transpose(1, 2).contiguous().view(batch_size, seq_len, self.hidden_dim)
        return self.out_proj(context)


class EnhancedModel(nn.Module):
    def __init__(self, vocab_size):
        super().__init__()
        self.embedding = nn.Embedding(vocab_size, Config.EMBED_DIM, padding_idx=0)
        
        # 增加模型复杂度
        self.lstm = nn.LSTM(
            Config.EMBED_DIM, 
            Config.HIDDEN_DIM, 
            num_layers=2,  # 增加到2层
            bidirectional=True, 
            batch_first=True,
            dropout=0.3 if Config.DROPOUT > 0 else 0
        )
        
        # 添加额外的注意力层
        self.attention = nn.MultiheadAttention(
            embed_dim=Config.HIDDEN_DIM * 2,
            num_heads=Config.N_HEADS,
            dropout=Config.DROPOUT
        )
        
        # 主语标注层
        self.subject_classifier = nn.Sequential(
            nn.Linear(Config.HIDDEN_DIM * 2, Config.HIDDEN_DIM),
            nn.LayerNorm(Config.HIDDEN_DIM),
            nn.ReLU(),
            nn.Dropout(Config.DROPOUT),
            nn.Linear(Config.HIDDEN_DIM, 1)
        )
        
        # 情感分类层
        self.pooler = nn.Sequential(
            nn.Linear(Config.HIDDEN_DIM * 2, Config.HIDDEN_DIM),
            nn.Tanh()
        )
        self.sentiment_classifier = nn.Sequential(
            nn.Dropout(Config.DROPOUT),
            nn.Linear(Config.HIDDEN_DIM, 2)
        )
        
    def forward(self, x):
        # 嵌入层
        embedded = self.embedding(x)
        
        # LSTM层
        lstm_out, _ = self.lstm(embedded)
        
        # 自注意力机制
        attn_output, _ = self.attention(
            lstm_out.transpose(0, 1),
            lstm_out.transpose(0, 1),
            lstm_out.transpose(0, 1)
        )
        attn_output = attn_output.transpose(0, 1)
        
        # 主语标注
        subject_logits = self.subject_classifier(attn_output).squeeze(-1)
        
        # 情感分类
        pooled = self.pooler(lstm_out[:, 0, :])
        sentiment_logits = self.sentiment_classifier(pooled)
        
        return subject_logits, sentiment_logits