import torch
from torch import nn
from .self_attention import MultiHeadAttention
from .misc import PositionWiseFFN, PositionalEncoding
class EncoderBlock(nn.Module):
    def __init__(self, hidden_dim, num_heads):
        super(EncoderBlock, self).__init__()
        self.attention=MultiHeadAttention(num_hiddens=hidden_dim, num_heads=num_heads)
        self.ffn=PositionWiseFFN(input_dim=hidden_dim)
        self.norm=nn.LayerNorm(hidden_dim)
        
    def forward(self, x, valid_len):
        attn=self.attention(x, x, x, valid_len)+x
        attn=self.norm(attn)
        ffn=self.ffn(attn)+attn
        return self.norm(ffn)
    
class DecoderBlock(nn.Module):
    def __init__(self, hidden_dim, num_heads):
        super(DecoderBlock, self).__init__()
        self.attention=MultiHeadAttention(num_hiddens=hidden_dim, num_heads=num_heads)
        self.ffn=PositionWiseFFN(input_dim=hidden_dim)
        self.norm=nn.LayerNorm(hidden_dim)
        
    def forward(self, x, k, v, valid_len):
        attn=self.attention(x, k, v, valid_len)+x
        attn=self.norm(attn)
        ffn=self.ffn(attn)+attn
        return self.norm(ffn)
    
class NaiveTransformerTranslater(nn.Module):
    def __init__(self, n_encoder, n_decoder, hidden_dim, num_heads, vocab_size):
        super(NaiveTransformerTranslater, self).__init__()
        self.encoder_stack=nn.Sequential()
        # self.encoder_stack.add_module('PEC', PositionalEncoding(hidden_dim))
        self.pos=PositionalEncoding(hidden_dim)
        for i in range(n_encoder):
            self.encoder_stack.add_module(f'enc {i}', EncoderBlock(hidden_dim, num_heads))
        self.decoder_stack=nn.Sequential()
        # self.decoder_stack.add_module('PEC', PositionalEncoding(hidden_dim))
        for i in range(n_decoder):
            self.decoder_stack.add_module(f"dec {i}", DecoderBlock(hidden_dim, num_heads))
        # 最后的全连接分类头
        self.classify=nn.LazyLinear(vocab_size)
            
    def forward(self, x, label, x_valid_len, y_valid_len):
        device = x.device
        max_len = x.size(1)
        # 生成一个从 1 到 max_len 的序列
        seq_range = torch.arange(1, max_len + 1, device=device).float()
        # 生成掩码矩阵
        causal_mask = seq_range.unsqueeze(0).expand(len(y_valid_len), max_len) <= y_valid_len.unsqueeze(1)
        
        kv=self.pos(x)
        for block in self.encoder_stack:
            kv=block(x, x_valid_len)
        
        out=self.pos(label)
        for block in self.decoder_stack:
            out=block(label, kv, kv, causal_mask)
        return self.classify(out)
        
        
        
        