import torch
import torch.nn as nn
from torch.autograd import Variable
from tqdm import tqdm

# 词嵌入位置编码 正弦位置编码
class PositionalEncoding(nn.Module):
    def __init__(self, d_model, sequence, base=10000):
        super(PositionalEncoding, self).__init__()
        self.d_model = d_model
        self.sequence = sequence
        self.base = base
        
    def forward(self):
        # 初始化位置编码矩阵
        pe = torch.zeros(self.sequence, self.d_model, dtype=torch.float)
        # 计算位置编码的指数部分
        exp_1 = torch.arange(self.d_model//2, dtype=torch.float)
        exp_value = exp_1 / (self.d_model / 2)
        
        # 计算位置编码的值
        alpha = 1/(self.base ** exp_value)
        out = torch.arange(self.sequence, dtype=torch.float)[:, None] * alpha[None, :]
        # 将正弦和余弦值分别赋值给偶数和奇数位置
        pe[:, 0::2] = torch.sin(out)
        pe[:, 1::2] = torch.cos(out)
        return pe

class n_gram(nn.Module):
    def __init__(self, vocb_size, context_size, n_dim):
        super(n_gram, self).__init__()
        # 定义词嵌入层
        self.embed = nn.Embedding(vocb_size, n_dim)
        # 定义分类器
        self.classifier = nn.Sequential(
            nn.Linear(n_dim * context_size, 128),
            nn.ReLU(True),
            nn.Linear(128, vocb_size)
        )
    def forward(self, x):
        # 将输入的词嵌入
        x = self.embed(x)
        # 将嵌入向量展平
        x = x.view(1, -1)
        # 通过分类器
        x = self.classifier(x)
        return x

def cosine_similarity(x1, x2):
    # 计算余弦相似度
    return torch.sum(x1 * x2) / (torch.norm(x1) * torch.norm(x2))
if __name__ == '__main__':
    # 定义句子
    sentence = """
    it is a test sentence,
    and this is another test sentence,
    what is the meaning of life,
    I don't know,
    you can ask google,
    but I don't have a google.
    """.split()
    # 定义三元组
    trigram = [((sentence[i], sentence[i+1]), sentence[i+2]) for i in range(len(sentence)-2)]
    # 定义词汇表
    vocb = set(sentence)
    vocb = {word: i for i, word in enumerate(vocb)}
    idx2vocb = {i: word for i, word in enumerate(vocb)}
    
    # 定义模型
    net = n_gram(len(vocb), 2, 512)
    net.train()
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001)
    for epoch in tqdm(range(100)):
        for word, label in trigram:
            word = Variable(torch.LongTensor([vocb[w] for w in word]))
            label = Variable(torch.LongTensor([vocb[label]]))
            out = net(word)
            loss = criterion(out, label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
    
    net.eval()  # 将网络设置为评估模式
    word, label = trigram[10]  # 从trigram中获取第10个词和标签
    print(word, label)  # 打印词和标签
    word = Variable(torch.LongTensor([vocb[w] for w in word]))  # 将词转换为LongTensor类型
    out = net(word)  # 将词输入网络，得到输出

    print(idx2vocb[int(out.max(1)[1])])  # 打印输出中概率最大的词
    print(net.embed.weight.shape)  # 打印嵌入层的权重形状
    embeding = net.embed.weight  # 获取嵌入层的权重
    
    pe = PositionalEncoding(512, embeding.shape[0]).forward()  # 创建位置编码，并前向传播

    final_embeding = embeding + pe  # 将嵌入层权重和位置编码相加
    print(cosine_similarity(embeding[0], embeding[14]))  # 打印嵌入层权重中第0个和第14个词的余弦相似度
    print(cosine_similarity(final_embeding[0], final_embeding[14]))  # 打印相加后的嵌入层权重中第0个和第14个词的余弦相似度
    print(vocb)  # 打印词表
    
    
    
        
        
        
        
        
        