import numpy as np
import torch
import itertools
import pandas as pd  # 导入pandas库 美化输出

# self_attention
embedding_dim = 4
d_k = 3
d_v = 3
# 假设词表顺序依次为：猫、爱、吃、鱼
vocabulary = ["猫", "爱", "吃", "鱼"]
embedding_table = np.array(
    [
        [0.12, 0.85, -0.33, 0.56],  # "猫"
        [0.44, 0.21, 0.91, 0.13],  # "爱"
        [0.67, 0.34, -0.58, 0.79],  # "吃"
        [0.25, 0.92, 0.47, -0.31],  # "鱼"
    ]
)
# W_Q = np.random.randn(embedding_dim, d_k)
# W_K = np.random.randn(embedding_dim, d_k)
# W_V = np.random.randn(embedding_dim, d_v)
W_Q = torch.randn(embedding_dim, d_k, requires_grad=True)
W_K = torch.randn(embedding_dim, d_k, requires_grad=True)
W_V = torch.randn(embedding_dim, d_v, requires_grad=True)

# 优化器
optimizer = torch.optim.SGD([W_Q, W_K, W_V], lr=0.09)


def softmax(x, axis):
    # 兼容 torch.Tensor 和 numpy.ndarray
    if isinstance(x, torch.Tensor):
        x = x - torch.max(x, dim=axis, keepdim=True)[0]
        e_x = torch.exp(x)
        return e_x / torch.sum(e_x, dim=axis, keepdim=True)
    else:
        e_x = np.exp(x - np.max(x, axis=axis, keepdims=True))
        return e_x / np.sum(e_x, axis=axis, keepdims=True)


def explain_prediction(input_idx, target_idx, output, target_embedding_torch):
    input_words = [vocabulary[i] for i in input_idx]
    target_word = vocabulary[target_idx]
    pred_vec = output.mean(dim=0).detach().numpy()
    target_vec = target_embedding_torch.detach().numpy()
    mse = np.mean((pred_vec - target_vec) ** 2)
    print(f"输入词: {' '.join(input_words)}")
    print(f"目标词: {target_word}")
    print(f"预测向量: {np.round(pred_vec, 4)}")
    print(f"目标向量: {np.round(target_vec, 4)}")
    print(f"均方误差: {mse:.4f}")
    if mse < 0.05:
        print("预测: 正确")
    else:
        print("预测: 错误")


def self_attention(input_indices):
    # 1. 计算Q、K、V矩阵
    # 转换格式为torch.Tensor
    embedding_table_torch = torch.tensor(embedding_table, dtype=W_Q.dtype)
    # 计算非目标词的Q、K、V矩阵
    input_embeddings = embedding_table_torch[list(input_indices)]
    Q = input_embeddings @ W_Q  # (词数, d_k)
    K = input_embeddings @ W_K  # (词数, d_k)
    V = input_embeddings @ W_V  # (词数, d_v)

    # 这里的qkv是向量。不是矩阵
    # 2. 计算分数矩阵（每个词的Q和每个词的K做点积）
    scores = Q @ K.T  # (词数, 词数)，每个[i, j]是Q[i]和K[j]的点积

    # 3. 缩放分数，防止数值不稳定
    scores = scores / np.sqrt(d_k)  # Q.shape[1]就是d_k

    # 4. 对每一行做softmax，得到注意力权重
    weights = softmax(scores, axis=1)  # (词数, 词数)

    # 5. 用权重加权求和V，得到每个词的最终表示
    output = weights @ V  # (词数, d_v)
    print(output)
    return output, weights


def index():
    # 假设输入“猫 爱 吃”，目标是“鱼”的embedding
    # input_idx = [0, 1, 2]  # "猫 爱 吃"
    # target_idx = 3  # "鱼"
    for epoch in range(180):
        # print_qkv_vectors()

        vocab_size = len(embedding_table)  # 词表大小，比如4 embedding_table.shape[0]
        input_size = len(embedding_table) - 1  # 每次输入3个词
        all_indices = list(range(vocab_size))  # [0, 1, 2, 3]
        for input_idx in itertools.combinations(all_indices, input_size):
            # input_idx是一个元组，比如(0,1,2)
            # target是剩下的那个词
            # set(all_indices) 是 {0,1,2,3}，set(input_idx) 是当前组合（比如(0,1,2)），相减得到差集，即{3}
            # set() 将列表转换为集合：{0, 1, 2, 3}
            # 集合支持差集操作，用于找出两个集合的不同元素
            target_idx = list(set(all_indices) - set(input_idx))[0]  # 只剩一个
            # print("input:", input_idx, "target:", target_idx)

            target_embedding = embedding_table[target_idx][:d_v]  # (3,) 保留前三个元素

            output, weights = self_attention(input_idx)
            # output[-1] 是“吃”这个词的输出
            # print(output[-1])
            # 损失函数：均方误差
            target_embedding_torch = torch.tensor(target_embedding, dtype=output.dtype)
            loss = torch.mean((output - target_embedding_torch) ** 2)
            # loss = np.mean((output[-1] - target_embedding) ** 2)
            print(loss)
            # 上一次的梯度清空
            optimizer.zero_grad()
            # 反向传播（自动计算所有参数的梯度）
            loss.backward()
            # 参数更新（用梯度更新参数）
            optimizer.step()

            explain_prediction(input_idx, target_idx, output, target_embedding_torch)


index()
