import torch
from torch import nn


class Attention(nn.Module):
    def __init__(self, heads, head_dim, mask=None):
        super(Attention, self).__init__()
        self.heads = heads
        self.head_dim = head_dim
        self.qw = nn.Linear(self.head_dim, self.head_dim)
        self.kw = nn.Linear(self.head_dim, self.head_dim)
        self.vw = nn.Linear(self.head_dim, self.head_dim)
        self.mask = mask

    def forward(self, q, k, v):
        # 输入(B * N, C, T)
        q = q.reshape(q.shape[0], q.shape[1], self.heads, self.head_dim)
        k = k.reshape(k.shape[0], k.shape[1], self.heads, self.head_dim)
        v = v.reshape(v.shape[0], v.shape[1], self.heads, self.head_dim)

        q = self.qw(q)
        k = self.kw(k)
        v = self.vw(v)

        a = torch.einsum("nqhd,nkhd->nhqk", [q, k])
        if self.mask is not None:
            a = a.masked_fill(self.mask == 0, float("-1e20"))
        a = torch.softmax(a / ((self.heads * self.head_dim) ** 0.5), dim=3)

        out = torch.einsum("nhql,nlhd->nqhd", [a, v])
        out = out.reshape(out.shape[0], out.shape[1], -1)

        # out = self.fc(out)
        return out, a