import math

import torch
import torch.nn as nn
from torch.autograd import Variable


class PositionalEncoding(nn.Module):

    def __init__(self, feature_dim, dropout, max_len=5000):
        """
        feature_dim: 词嵌入维度, 
        dropout: 置0比率
        max_len: 每个句子的最大长度
        """
        super(PositionalEncoding, self).__init__()

        self.dropout = nn.Dropout(p=dropout)
        positional_encoding = torch.zeros(max_len, feature_dim)
        position = torch.arange(0, max_len).unsqueeze(1)

        div_term = torch.pow(10000.0, torch.arange(0, feature_dim, 2) / feature_dim)
        positional_encoding[:, 0::2] = torch.sin(position / div_term)
        positional_encoding[:, 1::2] = torch.cos(position / div_term)

        # 这样我们就得到了位置编码矩阵pe, pe现在还只是一个二维矩阵，要想和embedding的输出（一个三维张量）相加，
        # 就必须拓展一个维度，所以这里使用unsqueeze拓展维度.
        positional_encoding = positional_encoding.unsqueeze(0)

        # 最后把pe位置编码矩阵注册成模型的buffer，什么是buffer呢，
        # 把它认为是对模型效果有帮助的，但是却不是模型结构中超参数或者参数，不需要随着优化步骤进行更新的增益对象.
        # 注册之后我们就可以在模型保存后重加载时和模型结构与参数一同被加载.
        self.register_buffer('positional_encoding', positional_encoding)

    def forward(self, x):
        """从positional_encoding截取一段"""
        x = x + Variable(self.positional_encoding[:, :x.size(1)], requires_grad=False)
        # print('x in PositionalEncoding size: ', x.size())
        return self.dropout(x)


if __name__ == '__main__':
    drop = nn.Dropout(p=0.6)
    input = torch.randn(1, 10)
    output = drop(input)
    print(input)
    print(output)

    import matplotlib.pyplot as plt
    import numpy as np

    batch_size = 1
    seq_len = 100
    feature_dim = 20
    dropout = 0.0

    positional_encoding = PositionalEncoding(feature_dim=feature_dim, dropout=dropout)
    x = torch.zeros(batch_size, seq_len, feature_dim)
    y = positional_encoding(x)

    # 然后定义画布的横纵坐标, 横坐标到 seq_len的长度, 纵坐标是某一个词汇中的某维特征在不同长度下对应的值
    # 因为总共有d_model维之多, 我们这里只查看4,5,6,7维的值.
    dim = 4, 5, 6, 7, 8, 9
    plt.plot(np.arange(100), y[0, :, dim].data.numpy())
    plt.legend(["dim %d" % p for p in dim])
    plt.show()
