import torch.nn as nn
import torch

# 本脚本是手搓大模型系列的transformer
class MultiHeadAttention(nn.Module):
    def __init__(self, hidden_size, num_heads):
        super(MultiHeadAttention, self).__init__()
        # 隐藏层的维度
        self.hidden_size = hidden_size
        # 几个头
        self.num_heads = num_heads
        # 每个头分处理多少维度数据
        self.head_size = hidden_size // num_heads
        
        # 开始qkv的w映射矩阵
        self.q_w = nn.Linear(self.head_size, self.head_size)
        self.k_w = nn.Linear(self.head_size, self.head_size)
        self.v_w = nn.Linear(self.head_size, self.head_size)
        
        # 拼接函数，将多头的输出拼接后映射回原来的维度
        self.o_w = nn.Linear(self.hidden_size, self.hidden_size)
        
    def forward(self, x, att_mask = None):
        # 首先划分维度
        x = 
        q = self.q_w(x)
        k = self.k_w(x)
        v = self.v_w(x)
        
        pass
        