import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
from models.blocks import LayerNorm
from torchmetrics.functional import pairwise_cosine_similarity


class Attn_Net(nn.Module):

    def __init__(self, L=1024, D=256, dropout=False, n_classes=1):
        super(Attn_Net, self).__init__()
        self.module = [
            nn.Linear(L, D),
            nn.Tanh()]

        if dropout:
            self.module.append(nn.Dropout(0.25))

        self.module.append(nn.Linear(D, n_classes))

        self.module = nn.Sequential(*self.module)

    def forward(self, x):
        return self.module(x), x  # (S, n_classes), S对应不同的采样点，空间


class Attn_Net_Gated(nn.Module):
    def __init__(self, L=1024, D=256, dropout=False, n_classes=1):
        super(Attn_Net_Gated, self).__init__()
        self.attention_a = [
            nn.Linear(L, D),
            nn.Tanh()]

        self.attention_b = [nn.Linear(L, D),
                            nn.Sigmoid()]
        if dropout:
            self.attention_a.append(nn.Dropout(0.25))
            self.attention_b.append(nn.Dropout(0.25))

        self.attention_a = nn.Sequential(*self.attention_a)
        self.attention_b = nn.Sequential(*self.attention_b)

        self.attention_c = nn.Linear(D, n_classes)

    def forward(self, x):
        # x: (S, C=L) S对应不同的采样点，空间, C是通道
        a = self.attention_a(x)  # (S, C=D)
        b = self.attention_b(x)  # (S, C=D)
        A = a.mul(b)  # (S, C=D)
        A = self.attention_c(A)  # (S, n_classes), S对应不同的采样点，空间
        return A, x


class Attn_ConvNet(nn.Module):
    def __init__(self, L=1024, D=64, n_classes=1):
        super(Attn_ConvNet, self).__init__()
        groups = 1
        self.conv1 = nn.Sequential(
            nn.Conv1d(L, D, kernel_size=1, stride=1, padding=0,
                      bias=False, groups=groups),
            LayerNorm(D,),
            nn.Tanh(),
        )
        self.conv2 = nn.Sequential(
            nn.Conv1d(L, D, kernel_size=1, stride=1, padding=0,
                      bias=False, groups=groups),
            LayerNorm(D,),
            nn.Sigmoid()
        )
        self.conv3 = nn.Sequential(
            nn.Conv1d(D, n_classes, kernel_size=1, stride=1, padding=0,
                      bias=False),
            # nn.ReLU6(), # 加上这个，attn会消失
        )

    def forward(self, x):
        raw_x = x
        # x: (S, C=L) S对应不同的采样点，空间, C是通道
        x = torch.unsqueeze(x, dim=0)  # (B=1, S, C)
        x = x.permute(0, 2, 1)  # (B=1, C, S)

        a = self.conv1(x)  # (B, n_classes, S), S对应不同的采样点，空间
        b = self.conv2(x)
        c = a.mul(b)
        attn = self.conv3(c)
        attn = attn.permute(0, 2, 1).squeeze(dim=0)
        return attn, raw_x


class Attn_External(nn.Module):

    def __init__(self, d_model, S=64, n_classes=1):
        super().__init__()
        self.mk = nn.Linear(d_model, S, bias=False)
        self.mv = nn.Linear(S, n_classes, bias=False)
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        if x.dim() == 2:
            x = torch.unsqueeze(x, dim=0)
        attn = self.mk(x)  # bs,n,S
        attn = self.softmax(attn)  # bs,n,S
        attn = attn / torch.sum(attn, dim=2, keepdim=True)  # bs,n,S
        out = self.mv(attn)  # bs,n,d_model
        return out.squeeze(dim=0), x


class ChannelAttention1D(nn.Module):
    def __init__(self, channel, ratio=4):
        super(ChannelAttention1D, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.max_pool = nn.AdaptiveMaxPool1d(1)

        self.shared_MLP = nn.Sequential(
            nn.Conv1d(channel, channel // ratio, 1, bias=False),
            nn.ReLU(),
            nn.Conv1d(channel // ratio, channel, 1, bias=False)
        )
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        # x: (S, C=L) S对应不同的采样点，空间, C是通道
        x = torch.unsqueeze(x.permute([1, 0]), dim=0)
        avgout = self.shared_MLP(self.avg_pool(x))
        maxout = self.shared_MLP(self.max_pool(x))
        attn = self.sigmoid(avgout + maxout)
        return attn.squeeze(dim=-1)


class Attn_CoordNet(nn.Module):
    def __init__(self, L=1024, D=256, n_classes=1, ):
        super(Attn_CoordNet, self).__init__()

        self.attention_a = [nn.Linear(L, D),
                            nn.Tanh()]

        self.attention_b = [nn.Linear(L, D),
                            nn.Sigmoid()]

        self.attention_a = nn.Sequential(*self.attention_a)
        self.attention_b = nn.Sequential(*self.attention_b)

        self.attention_c = nn.Linear(D, n_classes)

        self.gamma = nn.Parameter(torch.zeros(1))
        self.coord_fc = nn.Sequential(
            nn.Linear(2, L),
        )

        self.similar_fc = nn.Sequential(
            nn.Linear(L, D),
        )
        self.softmax = nn.Softmax(dim=0)

    def forward(self, x, coords):
        # 生成位置编码
        coords = self._coord_normalize(coords)
        pe = self.coord_fc(coords)
        # 计算距离
        dist = self._distance(pe)
        # 计算根据图块之间距离 加权后的 特征相似性，由此生成注意力向量r
        r = self._similar(x, dist)

        # 图块特征 叠加 pe
        x = x + pe

        # x: (S, C=L) S对应不同的采样点，空间, C是通道
        a = self.attention_a(x)  # (S, C=D)
        b = self.attention_b(x)  # (S, C=D)
        A = a.mul(b)  # (S, C=D)

        A = self.attention_c(A)  # (S, n_classes), S对应不同的采样点，空间
        A2 = A * r
        return A2, x

    def _similar(self, x, d):
        x1 = self.similar_fc(x)
        x1t = x1.permute(1, 0)
        energy = x1 @ x1t

        energy = energy * (1 - d)
        corr = energy.mean(dim=1, keepdim=True)
        corr = self.softmax(corr)
        return corr

    def _distance(self, c):
        c_T = c.permute(1,0)
        dist = torch.mm(c, c_T)
        dist = dist.float()

        # exp_dist = torch.exp(-dist)
        # 将对角线上置0
        diag = torch.diag(dist)
        a_diag = torch.diag_embed(diag)
        dist = dist - a_diag
        return dist

    def _coord_normalize(self, c):
        c = c.float()

        min_c, _ = torch.min(c, dim=0)
        max_c, _ = torch.max(c, dim=0)
        norm_c = (c - min_c) / (max_c - min_c)

        return norm_c


if __name__ == '__main__':
    from torchinfo import summary
    # print("############### Attn_ConvNet #############")
    # model = Attn_ConvNet(L=512, D=2, n_classes=1)
    # summary(model, (100, 512), col_names=['input_size','output_size', 'num_params'])

    # print("############### Attn_Net #############")
    # model = Attn_Net(L=512, D=256, dropout=True, n_classes=1)
    # summary(model, (100, 512), col_names=['input_size','output_size', 'num_params'])

    # print("############### Attn_Net_Gated ###############")
    # model = Attn_Net_Gated(L=512, D=256, dropout=True, n_classes=1)
    # summary(model, (100, 512))

    # print("############### Attn_External ###############")
    # model = Attn_External(d_model=512, S=64, n_classes=1)
    # summary(model, (100, 512))

    # print("############### ChannelAttentionModule1D #############")
    model = ChannelAttention1D(channel=512, ratio=4)
    summary(model, (100, 512), col_names=[
            'input_size', 'output_size', 'num_params'])

    print("############### Attn_CoordNet #############")
    model = Attn_CoordNet(L=512, D=256, n_classes=1)
    summary(model, [(40, 512), (40, 2)],
            col_names=['input_size', 'output_size', 'num_params'])
