





class PositionEncoder(nn.Module):
    def __init__(self,d_model,max_seq_len = 80):
        super().__init__()
        self.model = d_model

        # 根据pos和i创建一个常量PE矩阵
        pe = torch.zeros(max_seq_len, d_model)
        for pos in range(max_seq_len):
            for i in range(0,d_model,2):
                pe[pos,i] = math.sin(pos / (10000 ** (i/d_model)))

        pe = pe.unsqueeze(0)
        self.register_buffer('pe',pe)

    def forward(self,x):
        # 使得单词嵌入表示相对大一些
        x = x*math.sqrt(self.d_model)
        # 增加位置常量到单词嵌入表示中
        seg_len=x.size(1)
        x = x + Variable(self.pe[:,:seq_len], requires_grad=False)
        return x



