from typing import Optional
import math

import torch
import torch.nn as nn
import torch.nn.functional as F
from entmax import sparsemax, entmax15, entmax_bisect, EntmaxBisect



class ScaledDotProductAttention(nn.Module):

    def __init__(self, dropout: Optional[float] = None):
        '''Implemented simple attention'''
        super(ScaledDotProductAttention, self).__init__()
        self.dropout = nn.Dropout(p=dropout) if dropout is not None else nn.Identity()
        self.attn_type = 'entmax15'
        # 改
        self.attn_type = 'softmax'
        # self.conv2d_q = nn.Conv2d(28, 28, (1, 1))
        # self.conv2d_k = nn.Conv2d(28, 28, (1, 1))
        # self.conv2d_v = nn.Conv2d(28, 28, (1, 1))
        self.conv1d_q = nn.Linear(2048, 256)
        self.conv1d_k = nn.Linear(2048, 256)
        self.conv1d_v = nn.Linear(2048, 256)

    def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor,
                mask: Optional[torch.Tensor] = None) -> torch.Tensor:
        # 改
        q = self.conv1d_q(q)
        k = self.conv1d_k(k)
        v = self.conv1d_v(v)
        scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(q.size(-1))
        # scores = torch.matmul(k.transpose(-2, -1), q) / math.sqrt(q.size(-1))

        if mask is not None:
            scores = scores.masked_fill(mask == 0, -1e12)
        if self.attn_type == 'softmax':
            attn = F.softmax(scores, dim=-1)
        elif self.attn_type == 'sparsemax':
            attn = sparsemax(scores, dim=-1)
        elif self.attn_type == 'entmax15':
            attn = entmax15(scores, dim=-1)
        elif self.attn_type == 'entmax':
            attn = entmax_bisect(scores, alpha=1.6, dim=-1, n_iter=25)
        return torch.matmul(attn, v)
        # return torch.matmul(v, attn)


class RefPosMultiHeadAttention(nn.Module):

    def __init__(self, heads: int, inputs: int, a_dropout: Optional[float] = None, f_dropout: Optional[float] = None):
        '''Implemented simple multi-head attention'''
        super(RefPosMultiHeadAttention, self).__init__()
        self.heads = heads
        self.inputs = inputs
        assert inputs % heads == 0
        self.hidden = inputs // heads

        self.attention = ScaledDotProductAttention(a_dropout)
        self.linear_q = nn.Linear(inputs, inputs)
        self.linear_k = nn.Linear(inputs, inputs)
        self.linear_v = nn.Linear(inputs, inputs)
        self.output = nn.Linear(inputs, inputs)
        self.dropout = nn.Dropout(p=f_dropout) if f_dropout is not None else nn.Identity()

    def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor,
                mask: Optional[torch.Tensor] = None) -> torch.Tensor:
        bs = q.size(0)
        q = self.linear_q(q).view(bs, -1, self.heads, self.hidden).transpose(1, 2)
        k = self.linear_k(k).view(bs, -1, self.heads, self.hidden).transpose(1, 2)
        v = self.linear_v(v).view(bs, -1, self.heads, self.hidden).transpose(1, 2)
        mask = mask.unsqueeze(1).unsqueeze(1).repeat(1, self.heads, 1, 1)
        out = self.attention(q, k, v, mask).transpose(1, 2).contiguous()
        out = out.view(bs, -1, self.inputs)
        return self.dropout(self.output(out))


if __name__ == '__main__':
    q = torch.rand(2, 28, 14, 14)
    q = q.reshape(2,28,14*14)
    sa = ScaledDotProductAttention()
    res = sa(q, q, q)
    # index = torch.arange(0, 10)
    # q_ = q[:, index, index, :]
    # q__ = q_.view(-1, 14)
     # k = q.view(-1, 14, 14)
    # rr = RefPosMultiHeadAttention(2, 24, None, None)
    # rr(q__, k, k, None)

    pass