
import torch
import torch.nn.functional as F
import torch.nn as nn

def conv_1x1_bn(inp, oup):
    return nn.Sequential(
        nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
        nn.BatchNorm2d(oup),
        nn.ReLU(inplace=True)
    )

class SKConv(nn.Module):
    def __init__(self, channels, branches=2, groups=2, reduce=2, stride=1, len=32):
        super(SKConv, self).__init__()
        len = max(int(channels // reduce), len)
        self.convs = nn.ModuleList([])
        for i in range(branches):
            self.convs.append(nn.Sequential(
                nn.Conv2d(channels, channels, kernel_size=3, stride=stride, padding=1 + i, dilation=1 + i,
                          groups=groups, bias=False),
                nn.BatchNorm2d(channels),
                nn.ReLU(inplace=True)
            ))
        self.gap = nn.AdaptiveAvgPool2d((1, 1))
        self.fc = nn.Sequential(
            nn.Conv2d(channels, len, kernel_size=1, stride=1, bias=False),
            nn.BatchNorm2d(len),
            nn.ReLU(inplace=True)
        )
        self.fcs = nn.ModuleList([])
        for i in range(branches):
            self.fcs.append(
                nn.Conv2d(len, channels, kernel_size=1, stride=1,bias=False)
            )
        self.softmax = nn.Softmax(dim=1)

    def forward(self, x):
        x = [conv(x) for conv in self.convs]
        x = torch.stack(x, dim=1)
        attention = torch.sum(x, dim=1)
        attention = self.gap(attention)
        attention = self.fc(attention)
        attention = [fc(attention) for fc in self.fcs]
        attention = torch.stack(attention, dim=1)
        attention = self.softmax(attention)
        x = torch.sum(x * attention, dim=1)
        return x

'''-------------一、SE模块-----------------------------'''
# 全局平均池化+1*1卷积核+ReLu+1*1卷积核+Sigmoid
class SE_Block(nn.Module):
    def __init__(self, inchannel, ratio=16):
        super(SE_Block, self).__init__()
        # 全局平均池化(Fsq操作)
        self.gap = nn.AdaptiveAvgPool2d((1, 1))
        # 两个全连接层(Fex操作)
        self.fc = nn.Sequential(
            nn.Linear(inchannel, inchannel // ratio, bias=False),  # 从 c -> c/r
            nn.ReLU(),
            nn.Linear(inchannel // ratio, inchannel, bias=False),  # 从 c/r -> c
            nn.Sigmoid()
        )

    def forward(self, x):
        # 读取批数据图片数量及通道数
        b, c, h, w = x.size()
        # Fsq操作：经池化后输出b*c的矩阵
        y = self.gap(x).view(b, c)
        # Fex操作：经全连接层输出（b，c，1，1）矩阵
        y = self.fc(y).view(b, c, 1, 1)
        # Fscale操作：将得到的权重乘以原来的特征图x
        return x * y.expand_as(x)

#残差块
class resblock(nn.Module):  ## that is a part of model
    def __init__(self, inchannel, outchannel, kernel_size, stride, stride_1, stride_2):
        super(resblock, self).__init__()
        ## conv branch
        self.left = nn.Sequential(  ## define a serial of  operation
            # SKConv(inchannel),
            nn.BatchNorm2d(inchannel),
            nn.Dropout(p=0.05, inplace=False),
            self.SepConv(inchannel, outchannel, kernel_size, stride_1, padding=2),
            nn.BatchNorm2d(outchannel),
            nn.ReLU(inplace=True),
            nn.Dropout(p=0.05, inplace=False),
            self.SepConv(outchannel, outchannel, kernel_size, stride_2, padding=1),
            # SKConv(outchannel),
            nn.BatchNorm2d(outchannel)
        )
        self.SE = SE_Block(outchannel, ratio=8)  # 注意力机制
        # self.SK = SKConv(inchannel)
        ## shortcut branch
        self.short_cut = nn.Sequential()
        if stride != 1 or inchannel != outchannel:
            # print("short_cut is open ")
            self.short_cut = nn.Sequential(
                self.SepConv(inchannel, outchannel, kernel_size=1, stride=2)
            )
        # else:
        #     # print("short_cut is not open ")

    def SepConv(self, in_channel, out_channel, kernel_size, stride=1, padding=0):
        #         print(kernel_size, stride)
        SepCon = nn.Sequential(
            nn.Conv2d(in_channel, in_channel, kernel_size, stride, padding, groups=in_channel),
            nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=1, padding=0)
        )
        return SepCon

    ### get the residual
    def forward(self, x):
        # x= self.SK(x)
        left_out = self.left(x)
        left_out = self.SE(left_out)

        #         left_out=self.SE(left_out)
        # print("left:", left_out.shape)
        short_out = self.short_cut(x)
        # print('short:', short_out.shape)
        return F.relu(left_out+short_out)


class MyModel(nn.Module):
    def __init__(self, resblock, input_channel, num_class):
        super(MyModel, self).__init__()
        self.pre = self._pre(input_channel, 8)
        # self.SK = SKConv(8)
        self.layer1 = self._makelayer(resblock, 8, 8, 2, 4, stride=2)
        self.layer2 = self._makelayer(resblock, 8, 16, 2, 4, stride=2)
        self.layer3 = self._makelayer(resblock, 16, 32, 2, 4, stride=2)
        self.layer4 = self._makelayer(resblock, 32, 64, 2, 4, stride=2)
        # self.layer4 = conv_1x1_bn(32, 64)
        self.pool = nn.AdaptiveAvgPool2d((1, 1))  # 全局平均池化nn.AdaptiveAvgPool2d((1, 1))
        # self.pool = nn.AvgPool2d((2, 16), stride=1)  # 全局平均池化
        # self.pool = nn.AvgPool2d((2, 5), stride=1)  # 全局平均池化
        self.drop = nn.Dropout(p=0.2, inplace=False)
        self.fc = nn.Linear(64,num_class)
        # self.wide = nn.Linear(12, 20)
        # self.soft = nn.Softmax(dim=1)
        # for m in self.modules():eight, mode='fan_out', nonlinearity='relu')
        #         #     elif isinstance(m, nn.BatchNorm2
        #     if isinstance(m, nn.Conv2d):
        #         nn.init.kaiming_normal_(m.wd):
        #         nn.init.constant_(m.weight, 1)
        #         nn.init.constant_(m.bias, 0)


    def _pre(self, input_channel, outchannel):
        pre = nn.Sequential(
            nn.Conv2d(input_channel, outchannel, kernel_size=3, stride=1, padding=1, bias=False),
            nn.BatchNorm2d(outchannel),
            # nn.BatchNorm2d(num_features=8),
            nn.ReLU(inplace=True),
        )
        return pre



    def _makelayer(self, resblock, inchannel, outchannel, blockmum, kernel_size, stride=1):
        strides = [stride] + [1] * (blockmum - 1)
        layers = []
        channel = inchannel
        for i in range(blockmum):
            # print(channel, outchannel, kernel_size, strides[i], '1', strides[i])
            layers.append(resblock(channel, outchannel, kernel_size, strides[i], 1, strides[i]))
            channel = outchannel
        return nn.Sequential(*layers)

    def forward(self, x):
        x = x.unsqueeze(1)  # 将输入数据的通道数扩展为1
        # x = self.SK(x)
        x1 = self.pre(x)
        # x1 = self.SK(x1)
        #         print(x1.shape)
        x2 = self.layer1(x1)
        x3 = self.layer2(x2)
        x4 = self.layer3(x3)
        x5 = self.layer4(x4)
        x6 = self.pool(x5)
        x6 = self.drop(x6)
        x6 = x6.view(x6.size(0), -1)
        # y = self.wide(y)
        # x6 = torch.cat((x6, y), dim=1)
        x6 = self.fc(x6)
        # x6 = self.soft(x6)
        return x6

if __name__ == "__main__":
    input = torch.rand(128, 64, 239)
    model = MyModel(resblock, 1, 3)
    y = torch.rand(128, 12)
    outputs = model(input)
    # print(outputs.shape)
    total = sum(p.numel() for p in model.parameters())
    print("Total params: %.2fK" % (total / 1e3))

























