from typing import Optional
import math

import torch
import torch.nn as nn
import torch.nn.functional as F
from entmax import sparsemax, entmax15, entmax_bisect, EntmaxBisect



# class ScaledDotProductAttention(nn.Module):

#     def __init__(self, dropout: Optional[float] = None):
#         '''Implemented simple attention'''
#         super(ScaledDotProductAttention, self).__init__()
#         self.dropout = nn.Dropout(p=dropout) if dropout is not None else nn.Identity()
#         self.attn_type = 'entmax15'
#         # 改
#         self.attn_type = 'softmax'
#         # self.conv2d_q = nn.Conv2d(28, 28, (1, 1))
#         # self.conv2d_k = nn.Conv2d(28, 28, (1, 1))
#         # self.conv2d_v = nn.Conv2d(28, 28, (1, 1))
#         self.conv1d_q = nn.Linear(2048, 256)
#         self.conv1d_k = nn.Linear(2048, 256)
#         self.conv1d_v = nn.Linear(2048, 256)

#     def forward(self, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor,
#                 mask: Optional[torch.Tensor] = None) -> torch.Tensor:
#         # 改
#         q = self.conv1d_q(q)
#         k = self.conv1d_k(k)
#         v = self.conv1d_v(v)
#         scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(q.size(-1))
#         # scores = torch.matmul(k.transpose(-2, -1), q) / math.sqrt(q.size(-1))

#         if mask is not None:
#             scores = scores.masked_fill(mask == 0, -1e12)
#         if self.attn_type == 'softmax':
#             attn = F.softmax(scores, dim=-1)
#         elif self.attn_type == 'sparsemax':
#             attn = sparsemax(scores, dim=-1)
#         elif self.attn_type == 'entmax15':
#             attn = entmax15(scores, dim=-1)
#         elif self.attn_type == 'entmax':
#             attn = entmax_bisect(scores, alpha=1.6, dim=-1, n_iter=25)
#         return torch.matmul(attn, v)
        # return torch.matmul(v, attn)

class FC(nn.Module):
    def __init__(self, inplanes, outplanes):
        super(FC, self).__init__()
        self.fc = nn.Linear(inplanes, outplanes)
        self.bn = nn.BatchNorm1d(outplanes)
        self.act = nn.PReLU()

    def forward(self, x):
        x = self.fc(x)
        return self.act(x)


class GDN(nn.Module):
    def __init__(self, inplanes, outplanes, intermediate_dim=256):
        super(GDN, self).__init__()
        self.fc1 = FC(inplanes, intermediate_dim)
        self.fc2 = FC(intermediate_dim, outplanes)
        self.softmax = nn.Softmax()

    def forward(self, x):
        intermediate = self.fc1(x)
        out = self.fc2(intermediate)
        # return intermediate, self.softmax(out)
        return intermediate, torch.softmax(out, dim=1)



class MultiHeads(nn.Module):
    def __init__(self, feature_dim=2048, groups=4, mode='S', backbone_fc_dim=2048):
        super(MultiHeads, self).__init__()
        self.mode = mode
        self.groups = groups
        # self.Backbone = backbone[resnet]
        # self.instance_fc = FC(2048, feature_dim)
        self.layers1 = nn.Sequential(nn.Conv2d(2048, 2048, kernel_size=1),
                      nn.BatchNorm2d(2048),
                      nn.ReLU(inplace=True))

        self.layers = nn.Sequential(nn.Conv2d(2048, 4, kernel_size=1 ),
                      nn.BatchNorm2d(4),
                      nn.ReLU(inplace=True))

        self.group_layers = nn.ModuleList([nn.Sequential(nn.Conv2d(2048, 2048, kernel_size=1), nn.BatchNorm2d(2048), nn.ReLU(inplace=True)),
                                           nn.Sequential(nn.Conv2d(2048, 2048, kernel_size=1), nn.BatchNorm2d(2048), nn.ReLU(inplace=True)),
                                           nn.Sequential(nn.Conv2d(2048, 2048, kernel_size=1), nn.BatchNorm2d(2048), nn.ReLU(inplace=True)),
                                           nn.Sequential(nn.Conv2d(2048, 2048, kernel_size=1), nn.BatchNorm2d(2048), nn.ReLU(inplace=True))])


        # self.instance = FC(backbone_fc_dim, backbone_fc_dim)
        # self.GDN = GDN(feature_dim, groups)
        # self.group_fc = nn.ModuleList([FC(backbone_fc_dim, feature_dim) for i in range(groups)])
        self.feature_dim = feature_dim
        # self.down_sample = nn.Sequential(
        #         nn.Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False),
        #         nn.BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
        #     )

    def forward(self, x, c_l):
        B = x.shape[0]
        # x = self.Backbone(x)  # (B,4096)
        instacne_representation = self.layers1(x)  # self.instance_fc(x)
        # instance_r = self.instance(x)
        # GDN
        # group_inter, group_prob = self.GDN(instacne_representation)
        group_prob = self.layers(instacne_representation)
        # print(group_prob)
        # group aware repr
        # v_G = [Gk(x) for Gk in self.group_fc]  # (B,512)
        group_prob = torch.softmax(group_prob, dim=1)
        v_G = [Gk(c_l[i]) for (i, Gk) in enumerate(self.group_layers)]

        # self distributed labeling
        # group_label_p = group_prob.data
        # group_label_E = group_label_p.mean(dim=0)
        # group_label_u = (group_label_p - group_label_E.unsqueeze(dim=-1).expand(self.groups, B).T) / self.groups + (
        #         1 / self.groups)
        # group_label = torch.argmax(group_label_u, dim=1).data

        # group ensemble
        group_mul_p_vk = list()
        if self.mode == 'S':
            # for k in range(self.groups):
            #     Pk = group_prob[:, k].unsqueeze(dim=-1).expand(B, self.feature_dim)
            #     group_mul_p_vk.append(torch.mul(v_G[k], Pk))
            for k in range(self.groups):
                Pk = group_prob[:, k].unsqueeze(dim=-1).expand(B, self.feature_dim, 1, 1)
                group_mul_p_vk.append(torch.mul(v_G[k], Pk))
            group_ensembled = torch.stack(group_mul_p_vk).sum(dim=0)

        # instance , group aggregation
        final = instacne_representation + group_ensembled
        return 'group_inter', final, 'group_prob', 'group_label'

if __name__ == '__main__':
    q = torch.rand(2, 28, 14, 14)
    q = q.reshape(2,28,14*14)
    sa = ScaledDotProductAttention()
    res = sa(q, q, q)
    # index = torch.arange(0, 10)
    # q_ = q[:, index, index, :]
    # q__ = q_.view(-1, 14)
     # k = q.view(-1, 14, 14)
    # rr = RefPosMultiHeadAttention(2, 24, None, None)
    # rr(q__, k, k, None)

    pass
