# %%
# 第一重境界：简化版本
import math
import torch
import torch.nn as nn
# %%
class SelfAttention1(nn.Module):
    def __init__(self, hidden_dim: int = 728) -> None:
        super().__init__()
        self.hidden_dim = hidden_dim
        
        self.query_proj = nn.Linear(hidden_dim, hidden_dim)
        self.key_proj = nn.Linear(hidden_dim, hidden_dim)
        self.value_proj = nn.Linear(hidden_dim, hidden_dim)

    def forward(self, x):
        # X shape is: (batch_size, seq_len, hidden_dim)
        Q = self.query_proj(x)
        K = self.key_proj(x)
        V = self.value_proj(x)
        # Q K V shape (batch, seq, hidden_dim)

        # attention_value 是: (batch, seq, seq)
        attention_value = torch.matmul(
            # K 需要变成： (batch, hidden_dim, seq)
            Q, K.transpose(-1, -2)
        )

        attention_weight = torch.softmax(
            attention_value / math.sqrt(self.hidden_dim), # math.sqrt(self.hidden_dim) 纬度过大，value值可能过大，导致梯度爆炸。
            dim=-1
        )

        # (batch, seq, hidden)
        output = torch.matmul(attention_weight, V)
        # pass
        return output
# %%
X = torch.rand(3, 2, 4)
attention = SelfAttention1(4)
attention(X)
# %%
## 第二重：效率优化
class SelfAttention2(nn.Module):
    def __init__(self, dim) -> None:
        super().__init__()

        self.dim = dim
        self.QKV = nn.Linear(dim, dim * 3)
    
    def forward(self, x):
        QKV = self.QKV(x)

        Q, K, V = torch.split(QKV, self.dim, dim=-1)

        attention_value = torch.matmul(
            Q, K.transpose(-1, -2)
        )

        attention_weight = torch.softmax(
            attention_value / math.sqrt(self.dim),
            dim=-1
        )

        print("attention_weight:", attention_weight)

        output = attention_weight @ V

        return output
# %%
X = torch.rand(3, 4, 4)
attention = SelfAttention2(4)
attention(X)
# %%
### 第三重：加入一些细节
# 1、dropout 位置
# 2、attention_mask
# 3、output 矩阵映射
class SelfAttention3(nn.Module):
    def __init__(self, dim, dropout_rate = 0.1)->None:
        super().__init__()

        self.dim = dim
        self.QKV = nn.Linear(dim, dim * 3)

        self.attention_dropout = nn.Dropout(dropout_rate)

        # 可选
        self.output_proj = nn.Linear(dim, dim)
    
    def forward(self, x, attention_mask=None):
        # X (batch, seq, dim)
        QKV = self.QKV(x)

        Q, K, V = torch.split(QKV, self.dim, dim=-1)
        # (batch, seq, seq)
        attention_weight = Q @ K.transpose(-1, -2) / math.sqrt(self.dim)

        if attention_mask is not None:
            attention_weight = attention_weight.masked_fill(
                attention_mask == 0,
                float("-1e-20")
            )
        
        attention_weight = torch.softmax(attention_weight, dim=-1)

        attention_weight = self.attention_dropout(attention_weight)
        output = attention_weight @ V

        output = self.output_proj(output)

        return output
# %%
X = torch.rand(3, 4, 2)
mark = torch.tensor(
    [
        [1, 1, 1, 0],
        [1, 1, 0, 0],
        [1, 0, 0, 0]
    ]
)
print(mark.shape)
mark = mark.unsqueeze(dim=1)
print(mark.shape)
mark = mark.repeat(1, 4, 1)
print(f"repeat shape is:{mark.size()}")

net = SelfAttention3(2)
net(X, mark)
# %%
### 第四重 面试写法
class SelfAttention4(nn.Module):
    def __init__(self, dim:int, dropout_rate: float=0.1)->None:
        super().__init__()

        self.dim = dim
        self.Q = nn.Linear(dim, dim)
        self.K = nn.Linear(dim, dim)
        self.V = nn.Linear(dim, dim)

        self.attention_dropout = nn.Dropout(dropout_rate)

    def forward(self, x, attention_mask=None):
        
        Q = self.Q(x)
        K = self.K(x)
        V = self.V(x)

        attention_weight = Q @ K.transpose(-1, -2) / math.sqrt(self.dim)

        if attention_mask is not None:
            attention_weight = attention_weight.masked_fill(
                attention_mask == 0,
                float("-inf")
            )
        attention_weight = torch.softmax(
            attention_weight,
            dim=-1
        )
        attention_weight = self.attention_dropout(attention_weight)

        attention_result = attention_weight @ V

        return attention_result
# %%
X = torch.rand(3, 4, 2)
mark = torch.tensor(
    [
        [1, 1, 1, 0],
        [1, 1, 0, 0],
        [1, 0, 0, 0]
    ]
)
mark = mark.unsqueeze(dim=1)
mark = mark.repeat(1, 4, 1)

net = SelfAttention4(2)

net(X, mark)
# %%
