import torch
import torch.nn as nn
import math
import matplotlib.pyplot as plt
import numpy as np

# 定义词嵌入层
class Embeddings(nn.Module):
    def __init__(self, vocab_size, d_model):
        super(Embeddings, self).__init__()
        self.vocab_size = vocab_size
        self.d_model = d_model

        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=d_model)

    def forward(self, x):
        x = self.embedding(x) * math.sqrt(self.d_model)
        return x

def dm01_Embeddings():
    # 1 准备数据
    x = torch.tensor([[100, 2, 421, 508], [491, 998, 1, 221]])  # [2,4]

    my_embeddings = Embeddings(vocab_size=1000, d_model=512)

    y = my_embeddings(x)

    print(y)
    print(y.shape)

# 定义位置编码层
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=60):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)
        self.max_len = max_len
        self.d_model = d_model

        # 创建一个全零的位置编码矩阵pe [60,512]
        pe = torch.zeros(max_len, d_model)

        # 定义一个位置列矩阵position [60] -- [60,1]
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)

        # 定义转换矩阵div_term 套公式  [256]
        div_term = torch.exp(torch.arange(0, self.d_model, 2) * -(math.log(10000.0) / self.d_model))

        # 转换矩阵div_term * 位置列矩阵position = 三角函数里面的值
        # [60,1] * [256] = [60,256]
        position_value = position * div_term

        # 往pe中赋值 三角函数的值
        pe[:, 0::2] = torch.sin(position_value)
        pe[:, 1::2] = torch.cos(position_value)

        # 给位置编码矩阵pe增加一维度
        # [60,512] --> [1,60,512]
        pe = pe.unsqueeze(0)
        # 将位置编码矩阵pe注册到模型缓冲区
        self.register_buffer('pe', pe)

    def forward(self, x):
        # x [2,4,512]
        # x + 位置编码矩阵pe   给x增加位置编码
        x = x + self.pe[:,:x.size(1)]
        x = self.dropout(x)

        return x

def dm02_PositionalEncoding():
    x = torch.tensor([[100, 2, 421, 508], [491, 998, 1, 221]])  # [2,4]

    my_embeddings = Embeddings(vocab_size=1000, d_model=512)

    y = my_embeddings(x)   # [2,4,512]

    print(y)
    print(y.shape)
    positional_encoding = PositionalEncoding(d_model=512, max_len=60, dropout=0.1)
    print('positional_encoding:\n', positional_encoding)

    position_x = positional_encoding(y)

    print(position_x)
    print(position_x.shape)

def dm03_plot_PE():
    positional_encoding = PositionalEncoding(d_model=20, max_len=100, dropout=0.1)

    plt.figure(figsize=(40, 20))
    # pe  [1,max_len,d_model]  [1,100,20]
    plt.plot(np.arange(100), positional_encoding.pe[0, :, 4:8])
    plt.legend(['dim %d' % p for p in [4, 5, 6, 7]])
    plt.show()

if __name__ == '__main__':
    dm01_Embeddings()
    # dm02_PositionalEncoding()
    # dm03_plot_PE()