import torch
import torch.nn as nn

text_embedding = torch.tensor(
    [
        [0.43, 0.15, 0.89],  # Your (x^1)
        [0.55, 0.87, 0.66],  # journey (x^2)
        [0.57, 0.85, 0.64],  # starts (x^3)
        [0.22, 0.58, 0.33],  # with (x^4)
        [0.77, 0.25, 0.10],  # one (x^5)
        [0.05, 0.80, 0.55]  # step (x^6)
    ]
)


class SelfAttentionV2(nn.Module):
    """
    nn.Module 是 PyTorch 中所有神经网络的基类，所有神经网络都需要继承该类，并实现 forward 函数,
    针对模型层 model layer 的创建和管理提供了必要的功能
    """

    def __init__(self, d_in, d_out, qkv_bias=False):
        super().__init__()
        self.d_out = d_out

        # 初始 Q, K, V 权重矩阵,全部随机产生, d_in=3, d_out=2，时，shape 都为 (3, 2)
        # nn.Linear 内部初始化初始 Q, K, V 权重矩阵, 使用了更复杂的算法,原理上和V1版本中的 nn.Parameter 是一样的
        self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
        self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
        self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)

    def forward(self, x):
        # step-1: 计算和输入相关的 Q, K, V 权重矩阵, X.shape = (6, 3), 权重矩阵.shape = (3, 2), 计算结果.shape = (6, 2)
        keys = self.W_key(x)
        queries = self.W_query(x)
        values = self.W_value(x)

        # step-2: 计算attention score, 查询向量和键向量之间的点积,
        # queries.shape = (6, 2), keys.T.shape = (2, 6), 计算结果.shape = (6, 6)
        attn_scores = queries @ keys.T

        # step-3: 求缩放因子，Q 或 K 向量的最后一个维度的平方根
        scale = keys.shape[-1]**0.5

        # step-4: 计算attention weights, softmax 函数
        attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)

        # step-5: 计算context vector, attn_weights.shape = (6, 6), values.shape = (6, 2), 计算结果.shape = (6, 2)
        context_vec = attn_weights @ values

        return context_vec

class CausalAttention(nn.Module):
    def __init__(self, d_in, d_out, context_length, dropout, qkv_bias=False):
        super().__init__()
        self.d_out = d_out
        self.W_query = nn.Linear(d_in, d_out, bias=qkv_bias)
        self.W_key = nn.Linear(d_in, d_out, bias=qkv_bias)
        self.W_value = nn.Linear(d_in, d_out, bias=qkv_bias)

        self.dropout = nn.Dropout(dropout)
        self.register_buffer(
            'mask',
            torch.trilu(torch.ones(context_length, context_length), diagonal=1))

    def forward(self, x):
        b, num_tokens, d_in = x.shape
        keys = self.W_key(x)
        queries = self.W_query(x)
        values = self.W_value(x)

        attn_scores = queries @ keys.transpose(1, 2)
        attn_scores.masked_fill_(self.mask.bool()[:num_tokens, :num_tokens], -torch.inf)
        attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
        attn_weights = self.dropout(attn_weights)

        context_vec = attn_weights @ values
        return context_vec



def b():
    # step-1: compute attention score
    sa_v2 = SelfAttentionV2(3, 2)
    queries = sa_v2.W_query(text_embedding)
    keys = sa_v2.W_key(text_embedding)

    attn_scores = queries @ keys.T
    attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
    print(attn_scores)

    # step-2: mask with 0's above diagonal
    context_length = attn_scores.shape[0]
    mask_simple = torch.tril(torch.ones(context_length, context_length))
    print(mask_simple)

    mask_simple = attn_weights * mask_simple
    print(mask_simple)

    # step-3: normalize rows
    row_sums = mask_simple.sum(dim=-1, keepdim=True)
    print(row_sums)

    mask_simple_norm = mask_simple / row_sums
    print(mask_simple_norm)


def c():
    # step-1: compute attention score
    sa_v2 = SelfAttentionV2(3, 2)
    queries = sa_v2.W_query(text_embedding)
    keys = sa_v2.W_key(text_embedding)

    attn_scores = queries @ keys.T
    attn_weights = torch.softmax(attn_scores / keys.shape[-1]**0.5, dim=-1)
    print(attn_scores)

    # step-2: mask with 0's above diagonal
    context_length = attn_scores.shape[0]
    # mask_simple = torch.tril(torch.ones(context_length, context_length))
    mask_simple = torch.trilu(torch.ones(context_length, context_length), diagonal=1)
    print(mask_simple)

    masked_simple = attn_weights.masked_fill(mask_simple.bool(), -torch.inf)
    print(mask_simple)

    # step-3: normalize rows
    attn_weights = torch.softmax(masked_simple / keys.shape[-1]**0.5, dim=1)
    print(attn_weights)

    mask_simple_norm = mask_simple / row_sums
    print(mask_simple_norm)



if __name__ == '__main__':
    b()