import torch
import torch.nn as nn

text_embedding = torch.tensor(
    [
        [0.43, 0.15, 0.89],  # Your (x^1)
        [0.55, 0.87, 0.66],  # journey (x^2)
        [0.57, 0.85, 0.64],  # starts (x^3)
        [0.22, 0.58, 0.33],  # with (x^4)
        [0.77, 0.25, 0.10],  # one (x^5)
        [0.05, 0.80, 0.55]  # step (x^6)
    ]
)


class SelfAttentionV1(nn.Module):
    """
    nn.Module 是 PyTorch 中所有神经网络的基类，所有神经网络都需要继承该类，并实现 forward 函数,
    针对模型层 model layer 的创建和管理提供了必要的功能
    """

    def __init__(self, d_in, d_out):
        super().__init__()
        self.d_out = d_out

        # 初始 Q, K, V 权重矩阵,全部随机产生, d_in=3, d_out=2，时，shape 都为 (3, 2)
        self.W_query = nn.Parameter(torch.rand(d_in, d_out))
        self.W_key = nn.Parameter(torch.rand(d_in, d_out))
        self.W_value = nn.Parameter(torch.rand(d_in, d_out))

    def forward(self, x):
        # step-1: 计算和输入相关的 Q, K, V 权重矩阵, X.shape = (6, 3), 权重矩阵.shape = (3, 2), 计算结果.shape = (6, 2)
        keys = x @ self.W_key
        queries = x @ self.W_query
        values = x @ self.W_value

        # step-2: 计算attention score, 查询向量和键向量之间的点积,
        # queries.shape = (6, 2), keys.T.shape = (2, 6), 计算结果.shape = (6, 6)
        attn_scores = queries @ keys.T

        # step-3: 求缩放因子，Q 或 K 向量的最后一个维度的平方根
        scale = keys.shape[-1]**0.5

        # step-4: 计算attention weights, softmax 函数
        attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)

        # step-5: 计算context vector, attn_weights.shape = (6, 6), values.shape = (6, 2), 计算结果.shape = (6, 2)
        context_vec = attn_weights @ values

        return context_vec


class SelfAttentionV2(nn.Module):
    """
    nn.Module 是 PyTorch 中所有神经网络的基类，所有神经网络都需要继承该类，并实现 forward 函数,
    针对模型层 model layer 的创建和管理提供了必要的功能
    """

    def __init__(self, d_in, d_out, qkv_bias=False):
        super().__init__()
        self.d_out = d_out

        # 初始 Q, K, V 权重矩阵,全部随机产生, d_in=3, d_out=2，时，shape 都为 (3, 2)
        # nn.Linear 内部初始化初始 Q, K, V 权重矩阵, 使用了更复杂的算法,原理上和V1版本中的 nn.Parameter 是一样的
        self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
        self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
        self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)

    def forward(self, x):
        # step-1: 计算和输入相关的 Q, K, V 权重矩阵, X.shape = (6, 3), 权重矩阵.shape = (3, 2), 计算结果.shape = (6, 2)
        keys = self.W_key(x)
        queries = self.W_query(x)
        values = self.W_value(x)

        # step-2: 计算attention score, 查询向量和键向量之间的点积,
        # queries.shape = (6, 2), keys.T.shape = (2, 6), 计算结果.shape = (6, 6)
        attn_scores = queries @ keys.T

        # step-3: 求缩放因子，Q 或 K 向量的最后一个维度的平方根
        scale = keys.shape[-1]**0.5

        # step-4: 计算attention weights, softmax 函数
        attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)

        # step-5: 计算context vector, attn_weights.shape = (6, 6), values.shape = (6, 2), 计算结果.shape = (6, 2)
        context_vec = attn_weights @ values

        return context_vec





def a():
    x_2 = text_embedding[1]
    d_in = text_embedding.shape[1]
    d_out = 2

    torch.manual_seed(123)

    # 定义权重矩阵, 初始化阶段随机，权重矩阵初始化为 输入数据的行数，以及自定义的列数
    W_query = torch.nn.Parameter(torch.rand(d_in, d_out), requires_grad=False)
    W_key = torch.nn.Parameter(torch.rand(d_in, d_out), requires_grad=False)
    W_value = torch.nn.Parameter(torch.rand(d_in, d_out), requires_grad=False)

    query_2 = x_2 @ W_query
    key_2 = x_2 @ W_key
    value_2 = x_2 @ W_value
    print(x_2)
    print(W_query)
    print(query_2)
    print(key_2)
    print(value_2)

    keys = text_embedding @ W_key
    values = text_embedding @ W_value
    print("keys.shape: ", keys.shape)
    print("values.shape: ", values.shape)

    # step-2: 计算attention score, 查询向量和键向量之间的点积
    keys_2 = keys[1]
    attention_score_22 = query_2.dot(keys_2)
    print(attention_score_22)

    # 所有查询向量和所有键向量之间的点积，得到每一个元素的attention score
    attention_score_2 = query_2  @ keys.T
    print(attention_score_2)

    d_k = keys.shape[-1]
    print("shape-------------->: ", keys.shape, d_k, keys.size(), keys.size(-1))
    attention_weights_2 = torch.softmax(attention_score_2 / d_k ** 0.5, dim=-1)
    print(attention_weights_2)

    context_vec_2 = attention_weights_2 @ values
    print(context_vec_2)

if __name__ == '__main__':
    a()