import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.efficientnet import tf_efficientnet_b0_ns, tf_efficientnet_b3_ns, tf_efficientnet_b4_ns
__all__ = ['self_attn_space']

class Self_Attn(nn.Module):
    """ Self attention Layer"""

    def __init__(self, in_dim):
        super(Self_Attn, self).__init__()
        self.chanel_in = in_dim
        # self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
        # self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1)
        self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 3, kernel_size=1)
        self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 3, kernel_size=1)
        self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1)
        self.gamma = nn.Parameter(torch.zeros(1))
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, x):
        """
            inputs :
                x : input feature maps( B * C * W * H)
            returns :
                out : self attention value + input feature
                attention: B * N * N (N is Width*Height)
        """

        m_batchsize, C, width, height = x.size()
        proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1)  # B*N*C
        proj_key = self.key_conv(x).view(m_batchsize, -1, width * height)  # B*C*N
        energy = torch.bmm(proj_query, proj_key)  # batch的matmul B*N*N
        attention = self.softmax(energy)  # B * (N) * (N)
        proj_value = self.value_conv(x).view(m_batchsize, -1, width * height)  # B * C * N
        out_1 = torch.bmm(proj_value, attention.permute(0, 2, 1))  # B*C*N
        out_2 = out_1.view(m_batchsize, C, width, height)  # B*C*H*W
        out_3 = self.gamma * out_2 + x

        # output = []
        # output.append(x)          # 第1个
        # output.append(proj_query) # 第2个
        # output.append(proj_key)   # 第3个
        # output.append(energy)     # 第4个
        # output.append(attention)  # 第5个
        # output.append(proj_value) # 第6个
        # output.append(proj_query) # 第7个
        # output.append(out_1)      # 第8个
        # output.append(out_2)      # 第9个
        # output.append(out_3)      # 第10个
        #
        # for i, feat in enumerate(output):
        #     print("第{}个:".format(i+1), feat.shape)

        return out_3
        # return out_3, attention

class SelfAttnSpace(nn.Module):

    def __init__(self, in_dim, num_classes, pretrained=False):
        super(SelfAttnSpace, self).__init__()
        self.self_attn = Self_Attn(in_dim)
        self.features = tf_efficientnet_b0_ns(pretrained=pretrained, num_classes=num_classes)

    def forward(self, x):
        x1 = self.self_attn (x)
        x2 = self.features(x1)

        output = []
        output.append(x1)
        output.append(x2)
        for i, feat in enumerate(output):
            print("第{}个:".format(i+1), feat.shape)
        return x2

def self_attn_space():
    # attention_module = Self_Attn(in_dim=128)
    attention_module = Self_Attn(in_dim=3)
    # B,C,H,W
    # x = torch.rand((2, 128, 100, 100))
    x = torch.rand((4, 3, 112, 112))
    attention_module(x)

if __name__ == '__main__':
    # debug_attention()
    model = SelfAttnSpace(in_dim=3, num_classes=2, pretrained=True)
    x = torch.rand((4, 3, 112, 112))
    regression = model(x)