import torch
import torch.nn as nn
import torch.optim as optim # 随机梯度下降优化器
import matplotlib.pyplot as plt
import os
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

sentences = ["Kage is Teacher",
             "Mazong is Boss",
             "Niuzong is Boss",
             "Xiaobing is Student",
             "Xiaoxue is Student", ]

# 将所有句子连在一起，然后用空格分隔成多个单词
words = ' '.join(sentences).split()

# 构建词汇表， 去除重复的词
word_list = list(set(words))

# 创建一个字典，将每个词映射到一个唯一的索引
word_to_idx = {word: idx for idx, word in enumerate(word_list)}

# 创建一个字典，将每个索引映射到对应的词
idx_to_word = {idx: word for idx, word in enumerate(word_list)}

# 计算词汇表大小
voc_size = len(word_list)

# 词汇表
print("词汇表: ", word_list)

print("词汇到索引的字典: ", word_to_idx)
print("索引到词汇的字典: ", idx_to_word)
print("词汇表大小: ", voc_size)


# 生成Skip-Gram训练数据
def create_skipgram_dataset(sentences, window_size=2):
    # 初始化数据
    data = []
    for sentence in sentences:
        # 句子分隔成单词列表
        sentence = sentence.split()
        # 遍历单词及其索引
        for idx, word in enumerate(sentence):
            # 获取相邻的单词，将当前单词前后各N个单词作为相邻单词
            for neighbor in sentence[max(idx - window_size, 0): min(idx + window_size + 1, len(sentence))]:
                # 排除自身
                if neighbor != word:
                    # 将相邻单词与当前单词作为一组训练数据
                    data.append((neighbor, word))
    return data


# 使用函数创建Skip-Gram训练数据
skipgram_data = create_skipgram_dataset(sentences)
# 打印为编码的skip-Gram数据样例
print("Skip-Gram 数据样例(未编码): ", skipgram_data[:10])


def one_hot_encoding(word, word_to_idx):
    # 创建一个长度与词汇表相同的全0张量
    tensor = torch.zeros(len(word_to_idx))
    # 将对应词索引位置上的值设置为1
    tensor[word_to_idx[word]] = 1
    return tensor


word_example = "Teacher"
print("One-Hot 编码前的单词: ", word_example)
print("One-Hot 编码后的向量: ", one_hot_encoding(word_example, word_to_idx))
print("Skip-Gram 样例数据(已编码): ", [(one_hot_encoding(context, word_to_idx), word_to_idx[target]) for
                                context, target in skipgram_data[:3]])


# class SkipGram(nn.Module):
#     def __init__(self, voc_size, embedding_size):
#         super(SkipGram, self).__init__()
#         # 从词汇表大小到嵌入层大小(维度)的线性层(权重矩阵)
#         self.input_to_hidden = nn.Linear(voc_size, embedding_size, bias=False)
#         # 从嵌入层大小(维度)到词汇表大小的线性层(权重矩阵)
#         self.hidden_to_output = nn.Linear(embedding_size, voc_size, bias=False)
#
#     def forward(self, X):  # 前向传播的方式， X形状为 (batch_size, voc_size)
#         # 通过隐藏层，hiddel形状为(batch_size, embedding_size)
#         hidden = self.input_to_hidden(X)
#         # 通过输出层，output_layer形状为(batch_size, voc_size)
#         output = self.hidden_to_output(hidden)
#         return output

class SkipGram(nn.Module):
    def __init__(self, voc_size, embedding_size):
        super(SkipGram, self).__init__()
        # 从词汇表大小到嵌入层大小(维度)的线性层(权重矩阵)
        self.input_to_hidden = nn.Embedding(voc_size, embedding_size)
        # 从嵌入层大小(维度)到词汇表大小的线性层(权重矩阵)
        self.hidden_to_output = nn.Linear(embedding_size, voc_size, bias=False)

    def forward(self, X):  # 前向传播的方式， X形状为 (batch_size, voc_size)
        # 通过隐藏层，hiddel形状为(batch_size, embedding_size)
        hidden_layer = self.input_to_hidden(X)
        # 通过输出层，output_layer形状为(batch_size, voc_size)
        output_layer = self.hidden_to_output(hidden_layer)
        return output_layer

# 设置嵌入层大小
embeding_size = 2
skipgram_model = SkipGram(voc_size, embeding_size)
print("Skip-Gram类: ", skipgram_model)

# 设置学习率
learning_rate = 0.001
# 设置训练轮次
epochs = 1000
# 定义交叉熵损失函数
criterion = nn.CrossEntropyLoss()

optimizer = optim.SGD(skipgram_model.parameters(), lr=learning_rate)

# 开始循环训练
loss_values = [] #用于存储每轮的平均损失值

for epoch in range(epochs):
    # 初始化损失值
    loss_sum = 0
    for context, target in skipgram_data:
        # 将中心词转换为向量
        # X = one_hot_encoding(target, word_to_idx).float().unsqueeze(0)
        X = torch.tensor([word_to_idx[target]], dtype=torch.long)
        # 将周围词转换为索引值
        # y_true = torch.tensor([word_to_idx[context]], dtype=torch.long)
        y_true = torch.tensor([word_to_idx[context]], dtype=torch.long)
        # 计算预测值
        y_pred = skipgram_model(X)
        # 计算损失
        loss = criterion(y_pred, y_true)
        # 累积损失
        loss_sum += loss.item()
        # 清空梯度
        optimizer.zero_grad()
        # 反向传播
        loss.backward()
        # 更新参数
        optimizer.step()
    if(epoch + 1) % 100 == 0:
        print(f"Epoch: {epoch + 1}, Loss:{loss_sum/len(skipgram_data)}")
        loss_values.append(loss_sum / len(skipgram_data))


# 绘制训练损失曲线
# 设定字体样式
plt.rcParams["font.family"] = ["SimHei"]
# 设定无衬线字体样式
plt.rcParams["font.sans-serif"] = ["SimHei"]
# 用来正常显示负号
plt.rcParams["axes.unicode_minus"] = False
# 绘图
plt.plot(range(1, epochs//100+1), loss_values)

plt.title("训练损失曲线")
plt.xlabel("轮次")
plt.ylabel("损失")
# 显示图形
plt.show()

