import torch
from torch import nn
import torch.nn.functional as F
import math
#token embedding
#将输入的词汇表索引转换为指定维度的编码
class TokenEmbedding(nn.Embedding):
    def __init__(self, vocab_size,d_model):#词汇表的大小，维度
        super(TokenEmbedding,self).__init__(vocab_size,d_model,padding_idx=1)


#positional embedding
#位置编码
class positionalembedding(nn.Module):
    def __init__(self,d_model,max_len,device):
        super(positionalembedding,self).__init__()
        self.encoding = torch.zeros(max_len,d_model,device=device)#创建行大小为max_len,列大小为d_model的数组，每个值为0
        self.encoding.requires_grad = False
        pos = torch.arange(0,max_len,device=device)#生成位置索引[0,1.....max_len-1]
        pos = pos.float().unsqueeze(dim=1)#将位置索引转换为float，方便算三角函数，且unsqueeze(dim=1)增加一个维度[[0],[1],[2]....[max_len-1]]
        _2i = torch.arange(0,d_model,step=2,device=device).float()#生成一个步数为2的参数2i，[0,2,4....max_len-1]
        self.encoding[:,0::2] = torch.sin(pos/(10000**(_2i/d_model)))
        self.encoding[:,1::2] = torch.cos(pos/(10000**(_2i/d_model)))#计算PE
    
    def forward(self,x):
        batch_size,seq_len = x.size()#求出标记也就是token数量
        return self.encoding[:seq_len,:]#将其作为位置编码的大小


#编码
class transformerembedding(nn.Module):
    def __init__(self, vocab_size,d_model,max_len,drop_prob,device):
        super(transformerembedding,self).__init__()

        self.tok_emb = TokenEmbedding(vocab_size,d_model)#词嵌入
        self.pos_emb = positionalembedding(d_model,max_len,device)#位置编码
        self.drop_out = nn.Dropout(p=drop_prob)#丢掉一定概率的样本
    def forward(self,x):
        tok_emb = self.tok_emb
        pos_emb = self.pos_emb
        return self.drop_out(tok_emb+pos_emb)

#多头注意力
class mutiheadattention(nn.Module):
    def __init__(self, d_model,n_head):
        super(mutiheadattention.self).__init__()
        self.n_head = n_head
        self.d_model = d_model
        self.w_q = nn.Linear(d_model,d_model)
        self.w_k = nn.Linear(d_model,d_model)
        self.w_v = nn.Linear(d_model,d_model)

        self.w_combine = nn.Linear(d_model,d_model)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self,q,k,v,mask = False):
        batch,time,dimension = q.shape
        n_d = self.d_model // self.n_head
        q,k,v = self.w_q(q),self.w_k(k),self.w_v(v)
        q = q.view(batch,time,self.n_head,n_d).permute(0,2,1,3)
        k = k.view(batch,time,self.n_head,n_d).permute(0,2,1,3)
        v = v.view(batch,time,self.n_head,n_d).permute(0,2,1,3)
        score = q@k.transpose(2.3)/math.sqrt(n_d)
        if mask is not None:
            score = score.masked_fill(mask ==0,-10000)
        score = self.softmax(score)@v
        score = score.permute(0,2,1,3).contiguous().view(batch,dimension)
        output = self.w_combine(score)
        return output
    
#layernorm
class LayerNorm(nn.Module):
    def __init__(self,d_model,eps = 1e-12):
        super(LayerNorm,self).__init__()
        self.gamma = nn.Parameter(torch.one(d_model))#可学习的参数γ
        self.beta = nn.Parameter(torch.zero(d_model))#可学习的参数β
        self.eps = eps#防止除以0的最小常数
    def forward(self,x):
        mean =x.mean(-1,keepdim=True)#均值
        var = x.var(-1,unbiased = False,keepdim=True)#方差
        out = (x-mean)/torch.sqrt(var + self.eps)#归一化处理
        out =  self.gamma*out + self.beta#缩放和平移
        return out

#全连接层
class positionwisefeedforward(nn.Module):
    def __init__(self,d_model,hidden,dropout = 0.1 ):
        super(positionwisefeedforward,self).__init__()
        self.fc1 = nn.Linear(d_model,hidden)#扩大维度，从d_model扩大到n_head
        self.fc2 = nn.Linear(hidden,d_model)#缩小维度
        self.dropout = nn.Dropout(dropout)#防止过拟合？
    def forward(self,x):
        x = self.fc1(x)
        x =  F.relu(x)#函数max（0，x）
        x.self.dropout(x)
        x = self.fc2(x)
        return x

class encoderlayer(nn.Module):
    def __init__(self, d_model,ffn_hidden,n_head,dropout = 0.1):
        super(encoderlayer,self).__init__()
        self.attention = mutiheadattention(d_model,n_head)#多头注意力机制
        self.norm1 = LayerNorm(d_model)#归一化处理
        self.dropout1 = nn.Dropout(dropout)#防止过拟合，dropout
        self.ffn = positionwisefeedforward(d_model,ffn_hidden,dropout)#前馈神经网络
        self.norm2 = LayerNorm(d_model)#第二个层归一化
        self.dropout2 = nn.Dropout(dropout)
    def forward(self,x,mask = None):
        _x = x#残差处理
        x = self.attention(x,x,x,mask)
        x = self.dropout1(x)
        x = self.norm1(x + _x)
        _x = x
        x = self.ffn(x)
        x = self.dropout2(x)
        x = self.norm2(x + _x)
        return x
        
class encoder(nn.Module):
    def __init__(self, enc_voc_size,max_len,d_model,ffn_hidden,n_head,n_layer,dropout = 0.1,device):
        super(encoder,self).__init__()
        #词嵌入加位置编码
        self.embedding = transformerembedding(enc_voc_size, max_len,d_model,drop_prob=0.1,device)
        #多个编码器层
        self.layers = nn.ModuleList(
            [
                encoderlayer(d_model,ffn_hidden,n_head,device)
                for _ in range(n_layer)
            ]
        )
    def forward(self,x,s_mask):
        x = self.embedding(x)
        for layer in self.layers:
            x = layer(x,s_mask)
        return x
    
class DecoderLayer(nn.Module):
    def __init__(self,d_model,ffn_hidden,n_head,drop_prob):
        super(DecoderLayer,self).__init__()
        self.attention1 = mutiheadattention(d_model,n_head)
        self.norm1 = LayerNorm(d_model)
        self.dropout1 = nn.Dropout(drop_prob)
        self.cross_attention = mutiheadattention(d_model,n_head)
        self.norm2 = LayerNorm(d_model)
        self.dropout2 = nn.Dropout(drop_prob)
        self.ffn = positionwisefeedforward(d_model,ffn_hidden,drop_prob)
        self.norm3 = LayerNorm(d_model)
        self.dropout3 = nn.Dropout(drop_prob)
    def forward(self,dec,enc,t_mask,s_mask):
        _x = dec
        x = self.attention1(dec,dec,dec,t_mask)
        x = self.dropout1(x)
        x = self.norm1(x + _x)
        _x = x
        x = self.cross_attention(x,enc,enc,s_mask)
        x = self.dropout2(x)
        x = self.norm2(x + _x)
        x = self.ffn(x)
        x = self.dropout3(x)
        x = self.norm3(x)
        return x
    
class Decoder(nn.Module):
    def __init__(self, dec_voc_size,max_len,d_model,ffn_hidden,n_head,n_layer,dropout,device):
        super(Decoder,self).__init__()
        self.embedding = transformerembedding(dec_voc_size,d_model,max_len,n_head,dropout)
        self.layers = nn.ModuleList(
            [
                DecoderLayer(d_model,ffn_hidden,n_head,dropout)
                for _ in range(n_layer)
            ]
        )
        self.fc = nn.Linear(d_model,dec_voc_size)
    def forward(self,dec,enc,t_mask,s_mask):
        dec = self.embedding(enc)
        for layer in self.layers:
            dec = layer(dec,enc,t_mask,s_mask)
        dec = self.fc(dec)
        return dec

class Transformer(nn.Module):
    def __init__(self,
                 src_pad_ix,
                 trg_pad_idx,
                 enc_voc_size,
                 dec_voc_size,
                 d_model,
                 max_len,
                 n_heads,
                 ffn_hidden,
                 n_layers,
                 drop_prop,
                 device
                 ):
        super(Transformer,self).__init__()
        self.encoder = encoder(
            enc_voc_size,
            max_len,
            d_model,
            ffn_hidden,
            n_heads,
            n_layers,
            drop_prop,
            device
        )
        self.decoder = Decoder(dec_voc_size,max_len,d_model,ffn_hidden,n_heads,n_layers,drop_prop,device)
        self.src_pad_idx = src_pad_ix
        self.trg_pad_idx = trg_pad_idx
        self.device = device
    def make_pad_mask(self,q,k,pad_idx_q,pad_idx_k):
        len_q,len_k = q.size(1),k.size(1)
        q = q.ne(pad_idx_q).unsqueeze(1).unsqueeze(3)
        q = q.respeat(1,1,1,len_k)
        k = k.ne(pad_idx_k).unsqueeze(1).unsqueeze(2)
        k = k.respeat(1,1,len_q,1)
        mask = q&k
        return mask
    
    def make_casual_mask(self,q,k):
        mask = torch.trill(torch.ones(len_q,len_k)).type(torch.BoolTensor).to(self.device)
        return mask
    
    def forward(self,src,trg):
        src_mask = self.make_pad_mask(src,src,self.src_pad_idx,self.src_pad_idx)
        trg_mask = self.make_pad_mask(trg,trg,self.trg_pad_idx,self.trg_pad_idx)
        enc = self.encoder(src,src_mask)
        out = self.decoder(trg,src,trg_mask,src_mask)
        return out


