import torch
from torch import nn
from d2l import torch as d2l
import math


def masked_softmax(x, valid_len):
    '''
    目的：  实现掩码注意力，去掉不关心的部分
    细节：  如有任何代码细节不明白，请移步 README->网络架构->掩码->因果掩码
    '''
    def _mask(x, valid_len):
        mask=torch.arange(x.size(1), dtype=torch.float32, device=x.device)
        mask=mask[None, :]<valid_len[:, :, None]
        x[~mask]=-1e9
        return x
    
    if valid_len is None:
        return nn.functional.softmax(x, dim=-1)
    if valid_len.dim()==1:
        valid_len=torch.repeat_interleave(valid_len, x.size(1)).reshape(x.size(0), x.size(1))
    x=_mask(x, valid_len)
    return nn.functional.softmax(x, dim=-1)

class DotProductAttention(nn.Module):
    def __init__(self, dropout=0):
        super(DotProductAttention, self).__init__()
        self.dropout=dropout
        
    def forward(self, Q, K, V, valid_len):
        d=Q.size(-1)
        sim_score=torch.bmm(Q, K.transpose(1, 2))/math.sqrt(d)      # torch.bmm：       batch matrix mul, 多批次的矩阵乘法
        sim_score=masked_softmax(sim_score, valid_len)              # masked_softmax:   前面有定义
        return torch.bmm(sim_score, V)
    
class MultiHeadAttention(nn.Module):
    def __init__(self, num_hiddens, num_heads):
        '''
        多头划分机制：
        输入   [batch_size, seq_length, num_hiddens]
        划分为 [batch_size, seq_length, num_hiddens/num_heads, num_heads]
        划分机制十分类似ShuffleNet:
        并非依照seq_length进行划分，而是基于num_hiddens进行划分
        '''
        super(MultiHeadAttention, self).__init__()
        self.attention=DotProductAttention()
        assert num_hiddens%num_heads==0, f"num_hiddens必须能被平均的划分到head上"
        # LazyLinear: 指定输出维度，输入维度在运行时会自动确定。
        # // 为整除（保存为整数）
        self.W_q=nn.LazyLinear(out_features=num_hiddens//num_heads)
        self.W_k=nn.LazyLinear(out_features=num_hiddens//num_heads)
        self.W_v=nn.LazyLinear(out_features=num_hiddens//num_heads)
        
        # 尊重原文，也是尊重ShullfeNet, 也是为了多头信息的融合。
        self.W_o = nn.LazyLinear(num_hiddens)
        
        self.num_heads=num_heads
        
    def forward(self, q, k, v, valid_len):
        def transpose_to_multi_head(x, num_heads):
            # 输入形状：[batch_size, seq_length, num_hiddens]
            # 预期输出: [batch_size*num_heads, seq_length, num_hiddens/num_heads]
            # 相当于把头当成batch里面的一部分处理了，这样更容易做
            x=x.reshape(x.shape[0], x.shape[1], num_heads, -1)
            x=x.permute(0, 2, 1, 3)     # permute:直接生转置，改变相对的维度。相当于把head维度前提
            x=x.reshape(-1, x.shape[2], x.shape[3])
            return x
            
        def transpose_back(x, num_heads):
            # transpose_to_multi_head 的逆过程
            x=x.reshape(-1, num_heads, x.shape[1], x.shape[2])
            x=x.permute(0, 2, 1, 3)
            x=x.reshape(x.shape[0], x.shape[1], -1)
            return x
            
        queries =self.W_q(transpose_to_multi_head(q, self.num_heads))
        keys    =self.W_k(transpose_to_multi_head(k, self.num_heads))
        values  =self.W_v(transpose_to_multi_head(v, self.num_heads))
        
        # valid_len也要按照多头调整
        if valid_len is not None:
            valid_len=torch.repeat_interleave(valid_len, repeats=self.num_heads, dim=0)
        
        attention_result = self.attention(queries, keys, values, valid_len)
        attention_result = transpose_back(attention_result, self.num_heads)
        result=self.W_o(attention_result)
        return result
        
        