import math

from transformers import AutoTokenizer
import random
import torch
import torch.nn as nn

# import jieba
# sentence = "澳大利亚煤炭储量非常丰富,但是缺少农地"
# vocabulary = list(jieba.cut(sentence))
# print(vocabulary)
# 负采样和分层Softmax
# embedding
LearningRate = 0.2
Epoch = 40
tok = AutoTokenizer.from_pretrained("xlm-roberta-base")
# tok = AutoTokenizer.from_pretrained(
#     r"C:\Users\arthur.li1\.cache\huggingface\hub\models--xlm-roberta-base\snapshots\e73636d4f797dec63c3081bb6ed5c7b0bb3f2089"
# )
vocabulary = tok.tokenize("澳大利亚煤炭储量非常丰富,但是缺少农地")
# print(vocabulary)

WordToIndex = {}

for i, word in enumerate(vocabulary):
    WordToIndex[word] = i
# print(WordToIndex)

# Word Vector
WordVector = []
for i, word in enumerate(vocabulary):
    vec = [
        random.uniform(0.1, 0.9),
        random.uniform(0.1, 0.9),
        random.uniform(0.1, 0.9),
    ]
    WordVector.append(vec)
# print(WordVector)
# 转成 torch.Tensor
tensor = torch.tensor(WordVector)

# print(tensor)


def index_to_vector(index):
    return tensor[index]


def vector_to_index(vector):
    return tensor.index(vector)


def softmax(scores):
    max_score = max(scores)
    # math.exp 负数变正数
    # 为了让数字不要很大。整体减去最大值。 假设最大值是700  那么遍历数组 每个元素 - 700 . 这样就不会溢出
    exp_scores = [math.exp(score - max_score) for score in scores]

    # 2. 计算总和
    sum_exp_scores = sum(exp_scores)

    # 3. 归一化 所有的值加起来 = 1   所以每个值都是占比
    # 逻辑上,我和张三分数占百分之多少.我和李四分占比多少 之前是具体的分数.现在转换为百分比.
    probabilities = [exp_score / sum_exp_scores for exp_score in exp_scores]

    return probabilities


def outputLayer(paramVec):
    scoreVec = []
    for i in range(len(tensor)):
        vec = tensor[i]
        # 计算点积（相似度）
        score = 0
        for j in range(len(paramVec)):
            score += paramVec[j] * vec[j]
        scoreVec.append(score)
    return scoreVec


def hiddenLayer(index):
    vec = WordVector[index]  # 获取中心词向量
    scores = outputLayer(vec)  # 计算与所有词的相似度
    return scores


def inputLayer(index):
    hiddenLayer(index)
    return index_to_vector(WordToIndex["澳大利亚"])


# 一次训练
def train_step(center_index, context_index, learning_rate):
    # 1. 前向传播
    # 获取所有词的得分
    scores = hiddenLayer(center_index)
    # 转换为概率 / 占比 / 归一化
    probabilities = softmax(scores)

    # 2. 计算损失和梯度
    # 真实分布：只有上下文词的概率为1，其他为0
    true_distribution = [0] * len(vocabulary)
    true_distribution[context_index] = 1

    # 计算梯度 (p_i - y_i) * center_vec
    # 其中 p_i 是预测概率，y_i 是真实分布
    center_vec = tensor[center_index]
    gradients = []
    for i in range(len(vocabulary)):
        # 对于真实上下文词(
        #     i == context_index)：error = p_predicted - 1。这个误差是负的，意味着模型低估了它，我们需要增加这个词的向量与中心词向量的相似度。
        # 对于所有其他词(
        #     i != context_index)：error = p_predicted - 0。这个误差是正的，意味着模型高估了它们，我们需要减小这些词的向量与中心词向量的相似度。
        # 正确的值是1，错误的值是0 所以正确答案是 - 1 = 负数 错误答案 - 0 = 正数
        error = probabilities[i] - true_distribution[i]
        gradient = [error * component for component in center_vec]
        gradients.append(gradient)

    # 3. 更新权重（词向量）
    for i in range(len(vocabulary)):
        for j in range(len(tensor[i])):
            tensor[i][j] -= learning_rate * gradients[i][j]

    return probabilities


# 创建训练样本 (中心词索引, 上下文词索引)
def index():
    sorted_indices22 = sorted(range(len(tensor)), key=lambda i: tensor[i][0])
    print("按第0维排序前begin：")
    for ii in sorted_indices22:
        print(vocabulary[ii], tensor[ii].tolist())
    training_pairs = []
    window_size = 2  # 假设窗口大小为2

    for center_idx in range(len(vocabulary)):
        start = max(0, center_idx - window_size)
        end = min(len(vocabulary), center_idx + window_size + 1)

        for context_idx in range(start, end):
            if context_idx != center_idx:  # 排除中心词自身
                training_pairs.append((center_idx, context_idx))
    print(training_pairs)
    # 训练循环
    for epoch in range(Epoch):
        total_loss = 0
        random.shuffle(training_pairs)  # 随机打乱训练样本

        for center_idx, context_idx in training_pairs:
            probs = train_step(center_idx, context_idx, LearningRate)
            # 计算损失（交叉熵）
            epsilon = 1e-10
            loss = -math.log(probs[context_idx] + epsilon)
            total_loss += loss

        # print(f"Epoch {epoch}, Loss: {total_loss}")
    sorted_indices = sorted(range(len(tensor)), key=lambda i: tensor[i][0])
    print("按第0维排序后end：")
    for ii in sorted_indices:
        print(vocabulary[ii], tensor[ii].tolist())


index()
