import torch
import torch.nn as nn

print("PyTorch版本:", torch.__version__)
print("CUDA可用:", torch.cuda.is_available())
# 第一步：准备数据 - 创建词汇表和嵌入表
vocabulary = ["澳大利亚", "煤炭", "储量", "非常", "丰富", "但是", "缺少", "农地"]
vocab_size = len(vocabulary)
print("词汇表:", vocabulary)

word_to_id = {word: idx for idx, word in enumerate(vocabulary)}
print("词到索引的映射:", word_to_id)

embedding_dim = 3
embedding_table = torch.randn(vocab_size, embedding_dim, requires_grad=True)
print("初始化的嵌入表形状:", embedding_table.shape)

# 手动实现梯度下降
learning_rate = 0.1
epochs = 50

# 创建训练对
training_pairs = []
window_size = 2
for center_idx in range(len(vocabulary)):
    start = max(0, center_idx - window_size)
    end = min(len(vocabulary), center_idx + window_size + 1)
    for context_idx in range(start, end):
        if context_idx != center_idx:
            training_pairs.append((center_idx, context_idx))

print(f"创建了 {len(training_pairs)} 个训练对")

for epoch in range(epochs):
    total_loss = 0
    for center_id, context_id in training_pairs:
        # 前向传播
        center_vec = embedding_table[center_id]
        context_vec = embedding_table[context_id]
        similarity = torch.dot(center_vec, context_vec)

        # 计算损失：希望相似度为1
        loss = (similarity - 1.0) ** 2
        total_loss += loss.item()

        # 手动计算梯度
        if embedding_table.grad is not None:
            embedding_table.grad.zero_()
        loss.backward()

        # 手动更新参数
        with torch.no_grad():
            embedding_table -= learning_rate * embedding_table.grad
            # 清除梯度，为下一次计算准备
            embedding_table.grad.zero_()

    if (epoch + 1) % 10 == 0:
        print(
            f"轮次 [{epoch+1}/{epochs}], 平均损失: {total_loss/len(training_pairs):.6f}"
        )

# 训练完成后，查看所有词的最终向量
print("\n训练完成后所有词的向量:")
for word, idx in word_to_id.items():
    print(f"{word}: {embedding_table[idx].detach().numpy()}")

# 特别查看"澳大利亚"和"煤炭"的相似度
australia_id = word_to_id["澳大利亚"]
coal_id = word_to_id["煤炭"]
final_similarity = torch.dot(
    embedding_table[australia_id], embedding_table[coal_id]
).item()
print(f"\n'澳大利亚'和'煤炭'的最终相似度: {final_similarity:.6f}")

# 计算并显示所有词对的相似度矩阵
print("\n所有词对的相似度矩阵:")
print("        ", end="")
for word in vocabulary:
    print(f"{word[:4]:8}", end="")
print()

for i, word1 in enumerate(vocabulary):
    print(f"{word1[:8]:8}", end="")
    for j, word2 in enumerate(vocabulary):
        sim = torch.dot(embedding_table[i], embedding_table[j]).item()
        print(f"{sim:8.4f}", end="")
    print()
