import math
import torch
from torch import nn
from d2l import torch as d2l
from my_dataset import data_iter, vocab

"""
centers shape: torch.Size([16, 1]) batch_size = 16  words length = 1
contexts_negatives shape: torch.Size([16, 48]) batch_size = 16  words length = 48
masks shape: torch.Size([16, 48]) batch_size = 16  masks length = 48
labels shape: torch.Size([16, 48]) batch_size = 16  label length = 48
"""
# 增加一个嵌入层
embed = nn.Embedding(num_embeddings=20, embedding_dim=4)


# print(f'Parameter embedding_weight ({embed.weight.shape}, '
#       f'dtype={embed.weight.dtype})')
# # 模拟对中心词进行embedding
# x = torch.randint(0, 20, (16, 1))
# print(x.shape)
# print(embed(x).shape)
# # 模拟对上下文词进行embedding
# x = torch.randint(0, 20, (16, 48))
# print(x.shape)
# print(embed(x).shape)

# 14.4.1.2. 定义前向传播
def skip_gram(center, contexts_and_negatives, embed_v, embed_u):
    v = embed_v(center)
    u = embed_u(contexts_and_negatives)
    # print("v shape", v.shape)
    # print("u shape", u.shape, u.permute(0, 2, 1).shape)
    # v shape torch.Size([16, 1, 100]) torch.Size([16, 100, 48])
    pred = torch.bmm(v, u.permute(0, 2, 1))
    # print("pred shape", pred.shape)
    # 计算中心词和边缘词之间的关联关系 [16, 1, 42]
    return pred


"""
v shape torch.Size([16, 1, 4])
u shape torch.Size([16, 48, 4])
pred shape torch.Size([16, 1, 48])
"""


# skip_gram(torch.ones((16, 1), dtype=torch.long),
#           torch.ones((16, 48), dtype=torch.long), embed, embed)


# 14.4.2.1. 二元交叉熵损失
class SigmoidBCELoss(nn.Module):
    # 带掩码的二元交叉熵损失
    def __init__(self):
        super().__init__()

    def forward(self, inputs, target, mask=None):
        out = nn.functional.binary_cross_entropy_with_logits(
            inputs, target, weight=mask, reduction="none")
        return out.mean(dim=1)


def sigmd(x):
    return -math.log(1 / (1 + math.exp(-x)))


# print("###################################################")
loss = SigmoidBCELoss()
pred = torch.tensor([[1.1, -2.2, 3.3, -4.4]] * 2)
label = torch.tensor([[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]])
mask = torch.tensor([[1, 1, 1, 1], [1, 1, 0, 0]])
# print(loss(pred, label, mask) * mask.shape[1] / mask.sum(axis=1))

# 手工计算二元交叉熵损失
# print(f'{(sigmd(1.1) + sigmd(2.2) + sigmd(-3.3) + sigmd(4.4)) / 4:.4f}')
# print(f'{(sigmd(-1.1) + sigmd(-2.2)) / 2:.4f}')
# print("###################################################")

# 14.4.2.2. 初始化模型参数
embed_size = 3
net = nn.Sequential(nn.Embedding(num_embeddings=len(vocab),
                                 embedding_dim=embed_size),
                    nn.Embedding(num_embeddings=len(vocab),
                                 embedding_dim=embed_size))


# 14.4.2.3. 定义训练阶段代码
def train(net, data_iter, lr, num_epochs, device=d2l.try_gpu()):
    def init_weights(m):
        # 对模型m进行操作。
        if type(m) == nn.Embedding:
            nn.init.xavier_uniform_(m.weight)

    net.apply(init_weights)
    net = net.to(device)
    optimizer = torch.optim.Adam(net.parameters(), lr=lr)
    animator = d2l.Animator(xlabel='epoch', ylabel='loss',
                            xlim=[1, num_epochs])
    # 规范化的损失之和，规范化的损失数
    metric = d2l.Accumulator(2)
    for epoch in range(num_epochs):
        timer, num_batches = d2l.Timer(), len(data_iter)
        for i, batch in enumerate(data_iter):
            optimizer.zero_grad()
            center, context_negative, mask, label = [
                data.to(device) for data in batch]
            # 该方法计算的结果是1个中心词和N和上下文向量的关系
            pred = skip_gram(center, context_negative, net[0], net[1])
            # print("center",center.shape)
            # print("pred",pred.shape)
            # print("label",label.shape)

            """
            print("pred.reshape(label.shape).float()",pred.reshape(label.shape).float().shape)
            print("label.float()",label.float().shape)
            print("mask",mask.shape)
            pred.reshape(label.shape).float() torch.Size([16, 54])
            label.float() torch.Size([16, 54])
            mask torch.Size([16, 54])
            将 pred  从 torch.Size([16,1, 54]) 变为 torch.Size([16, 54]) 并将label和mask都统一
            """
            l = (loss(pred.reshape(label.shape).float(), label.float(), mask)
                 / mask.sum(axis=1) * mask.shape[1])
            # print("pred.reshape(label.shape).float()",pred.reshape(label.shape).float())
            # print(l,"--------------------------")
            l.sum().backward()
            optimizer.step()
            # metric.add(l.sum(), l.numel())
            # if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
            #     animator.add(epoch + (i + 1) / num_batches,
            #                  (metric[0] / metric[1],))
        # print("loss",l)
        # print(f'loss ------------------------ {metric[0] / metric[1]:.3f}, '
        #       f'{metric[1] / timer.stop():.1f} tokens/sec on {str(device)}')


lr, num_epochs = 0.003, 200
train(net, data_iter, lr, num_epochs)

import numpy as np


def get_similar_tokens(query_token, k, net):
    # 这一小节的核心就是这个
    embed = net[0]
    # W 是中心词的词嵌入参数
    W = embed.weight.data
    print("词嵌入信息", W.shape)
    # 词嵌入信息 torch.Size([183, 100])
    # x 是中心词对应词嵌入参数
    x = W[vocab[query_token]]
    # print("x.shape", x.shape)
    # print("W.shape", W.shape)
    # print("torch.mv(W, x)",torch.mv(W, x).shape)
    # print("torch.sum(W * W, dim=1)",torch.sum(W * W, dim=1).shape)
    # print("torch.sum(x * x)",torch.sum(x * x))
    # print("torch.sqrt(torch.sum(W * W, dim=1) * torch.sum(x * x) + 1e-9)",torch.sqrt(torch.sum(W * W, dim=1) * torch.sum(x * x) + 1e-9).shape)
    # # 计算余弦相似性。增加1e-9以获得数值稳定性
    # print("torch.mv(W, x)",torch.mv(W, x))
    print("尺度1", torch.sum(W * W, dim=1).shape)
    print("尺度2", torch.sum(x * x).shape)
    """
    公式 14.1.1
    torch.sum(W * W, dim=1)计算的是W的每个元素的平方和，即W的模（长度）的平方。
    torch.sum(x * x)计算的是x的每个元素的平方和，即x的模（长度）的平方。
    将这两个值相乘后开方，得到的是W和x的模的乘积，即它们原始长度的乘积。
    最后加上一个小的常数1e-9是为了避免除以零的情况，这是一个数值稳定性的技巧。
    大概是如下的逻辑技巧
    >>> a = torch.FloatTensor([1,2,3])
    >>> torch.sqrt(a@a.T)
    tensor(3.7417)
    >>> a/torch.sqrt(a@a.T)
t   ensor([0.2673, 0.5345, 0.8018])

    """
    # print("W111", W.shape)
    # print("x111", x.shape)
    cos = torch.mv(W, x) / torch.sqrt(torch.sum(W * W, dim=1) * torch.sum(x * x) + 1e-9)
    # cos1 = torch.mv(W, x)
    # print("cos.shape", cos.shape)
    topk = torch.topk(cos, k=k + 1)[1].cpu().numpy().astype('int32')
    # topk1 = torch.topk(cos1, k=k + 1)[1].cpu().numpy().astype('int32')
    # print("topk",topk)
    # for i in topk[1:]:
    #     print(f'cosine sim={float(cos[i]):.3f}: {vocab.to_tokens(i)}')
    # for i in topk1[1:]:
    #     print(f'cosine sim={float(cos[i]):.3f}: {vocab.to_tokens(i)}')
    return vocab.to_tokens(topk[1:][0])


from chinese_split_word_ictclas import sentences


def get_similar_sentences(query_tokens, k, net):
    # 这一小节的核心就是这个
    embed = net[0]
    # W 是中心词的词嵌入参数
    W = embed.weight.data
    # print("词嵌入信息", W.shape)
    cos_sum = torch.zeros([len(sentences)])
    for query_token in query_tokens:
        x = W[vocab[query_token]]
        cos = []
        for sentence in sentences:
            WS = []
            for i in sentence:
                WS.append(W[vocab[i]])

            WS = torch.stack(WS, dim=0)
            cos_2_v = torch.mv(WS, x) / torch.sqrt(torch.sum(WS * WS, dim=1) * torch.sum(x * x) + 1e-9)
            # print("cos_2_v", torch.mean(cos_2_v))
            # print(cos)
            cos.append(torch.mean(cos_2_v))
        cos = torch.stack(cos, dim=0)
        # print(cos)
        cos_sum += cos

    # print("cos",len(cos))
    topk = torch.topk(cos_sum, k=k + 1)[1].cpu().numpy().astype('int32')
    print("topk", topk)
    for i in topk:
        print(sentences[i])


# a = get_similar_tokens('每秒', 3, net)
# print(a)

# a = get_similar_sentences(['物理', '暴击率', '提高', '30%'], 3, net)
# print(a)
# a = get_similar_sentences(['法术', '提升'], 3, net)
# print(a)
"""
公式 14.2.3 解释
对于任何一句话来说，我们将计算联合概率
假设一句话有T个词，我们将按照每一个词为中心词计算每个词做为中心词的联合概率，然后再将他们乘起来，因此外层是T个概率乘积。
在计算每一个词作为中心词的概率的时候，设定m为窗口，获取左边和右边的各m个词。计算中心词出现同时各个周围的词出现的概率。
最后计算所有这些中心词计算出来的联合概率的乘积。

Word2Vec的主要问题是无法解决多义词的问题。（在没有多义词的场景中使用）
"""

# 可视化
embed = net[0]
# W 是中心词的词嵌入参数
W = embed.weight.data
print(W.shape)

import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import font_manager

# for i in font_manager.FontManager().ttflist:
#     print(i)
# 如何扫描本机是否有中文字体 fc-list :lang=zh
# 设置 Matplotlib 配置参数，使其支持中文显示
plt.rcParams['font.sans-serif'] = ['WenQuanYi Micro Hei']  # 使用黑体
plt.rcParams['axes.unicode_minus'] = False  # 正确显示负号
# 假设我们有一些三维数据点
x = W.T[0]
y = W.T[1]
z = W.T[2]

# 创建一个新的图形
fig = plt.figure()
# 添加一个3D子图
ax = fig.add_subplot(111, projection='3d')
# 为每个点生成一个颜色（这里使用RGB颜色）
colors = np.random.rand(W.shape[0], 3)  # 生成50个RGB颜色值，每个颜色值是一个长度为3的数组
# 设置标题和轴标签
ax.set_title('带有不同颜色和文字序号的3D散点图示例')
# 绘制3D散点图
scatter = ax.scatter(x, y, z, c=colors, marker='o')
ax.set_xlabel('X轴')
ax.set_ylabel('Y轴')
ax.set_zlabel('Z轴')
# 为每个数据点添加文字标签
for i, txt in enumerate(np.arange(1, W.shape[0] + 1)):  # 假设我们使用从1开始的序号作为标签
    ax.text(x[i], y[i], z[i], vocab.to_tokens([int(txt) - 1])[0], color='black')  # 这里的txt是标签，可以根据需要自定义
# 显示颜色条
fig.colorbar(scatter)
# 显示图形
plt.show()
