import torch
from torch import nn
import math
from sshcode.Transformer.Decoder import Decoder, get_subsequent_mask
from sshcode.Transformer.Encoder import Encoder, get_padding_mask
from sshcode.Transformer.output import Generator

class Transformer(nn.Module):
    def __init__(self,src_vocab_size,tgt_vocab_size,d_model,n_head,d_ff,N=6,dropout=0.1):
        super().__init__()
        self.encoder = Encoder(src_vocab_size,d_model,n_head,d_ff,N,dropout)
        self.decoder = Decoder(tgt_vocab_size,d_model,n_head,d_ff,N,dropout)
        self.generator = Generator(d_model,tgt_vocab_size)
    def forward(self,src_x,src_mask,tgt_x,tgt_mask):
        memory = self.encoder(src_x,src_mask)
        decoder_output = self.decoder(tgt_x,tgt_mask,memory,src_mask)
        return self.generator(decoder_output)

def make_model(src_vocab_size,tgt_vocab_size,d_model,n_head,d_ff,N=6,dropout=0.1):
    model = Transformer(src_vocab_size,tgt_vocab_size,d_model,n_head,d_ff,N,dropout)
    #参数初始化
    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
    return model

src_vocab_size = 6
tgt_vocab_size = 8
d_model = 512
n_head = 8
d_ff = 2048
N=6
dropout = 0.1

model = make_model(src_vocab_size,tgt_vocab_size,d_model,n_head,d_ff,N,dropout)
#打印模型的总参数量
# total_params = sum(p.numel() for p in model.parameters())
# print(total_params)
src_inputs = torch.tensor([
    [1,2,3],
    [4,5,0]
])
src_mask = get_padding_mask(src_inputs,0)
#decoder输入数据
tgt_inputs = torch.tensor([
    [1,2,3,4],
    [4,5,0,0]
])
tgt_pad_mask = get_padding_mask(tgt_inputs,0)
subsequent_mask = get_subsequent_mask(4)
tgt_mask = tgt_pad_mask | subsequent_mask
tgt_mask = tgt_mask != 0

predict = model(src_inputs,src_mask,tgt_inputs,tgt_mask)
print(predict)
print(predict.shape)