# 定义一个CNN模型
"""
inputs(48*48*1) ->
conv(24*24*64) -> conv(12*12*128) -> conv(6*6*256) ->
Dropout -> fc(4096) -> Dropout -> fc(1024) ->
outputs(7)
"""
from telnetlib import X3PAD
import torch
import torch.nn as nn
from .attention import se_block, cbam_block, eca_block
from thop import profile


# # 参数初始化
# def gaussian_weights_init(m):
#     classname = m.__class__.__name__
#     # 字符串查找find，找不到返回-1，不等-1即字符串中含有该字符
#     if classname.find('Conv') != -1:
#         m.weight.data.normal_(0.0, 0.04)


class FaceCNN(nn.Module):
    # 初始化网络结构
    def __init__(self, num_classes=7,attention_name=None):
        super(FaceCNN, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2d(1, 64, 3, 1, 1),
            nn.BatchNorm2d(num_features=64),
            nn.RReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        self.conv2 = nn.Sequential(
            nn.Conv2d(64, 128, 3, 1, 1),
            nn.BatchNorm2d(num_features=128),
            nn.RReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        self.conv3 = nn.Sequential(
            nn.Conv2d(128, 256, 3, 1, 1),
            nn.BatchNorm2d(num_features=256),
            nn.RReLU(inplace=True),
            nn.MaxPool2d(kernel_size=2, stride=2)
        )

        # # 参数初始化
        # self.conv1.apply(gaussian_weights_init)
        # self.conv2.apply(gaussian_weights_init)
        # self.conv3.apply(gaussian_weights_init)

        # # 全连接层
        self.fc1 = nn.Sequential(
            nn.Dropout(p=0.2),
            nn.Linear(4096, 4096),
            nn.RReLU(inplace=True),

            nn.Dropout(p=0.5),
            nn.Linear(4096, 1024),
            nn.RReLU(inplace=True),

            nn.Linear(1024, 256),
            nn.RReLU(inplace=True),
            nn.Linear(256, num_classes)
        )

        self.attention_name = attention_name
        if attention_name != None:
            if attention_name == "se":
                self.seAttention1 = se_block(64)
                self.seAttention2 = se_block(128)
                self.seAttention3 = se_block(256)
            elif attention_name == "cbam":
                self.seAttention1 = cbam_block(64)
                self.seAttention2 = cbam_block(128)
                self.seAttention3 = cbam_block(256)
            else:  # ECA
                self.seAttention1 = eca_block(64)
                self.seAttention2 = eca_block(128)
                self.seAttention3 = eca_block(256)
        else:
            pass

    # 向前传播
    def forward(self, x):         
        # print(x.shape)
        if self.attention_name !=None:
            x1 = self.conv1(x)  # (batch, 64, 24 ,24)
            x1 = self.seAttention1(x1)  # (batch, 64, 24 ,24)
            x2 = self.conv2(x1)  # (batch, 128,12, 12)
            x2 = self.seAttention2(x2)  # (batch, 128,12, 12)
            x3 = self.conv3(x2)  # (batch, 256, 6, 6)
            x3 = self.seAttention3(x3)  # (batch, 256, 6, 6)
        else:
            x1 = self.conv1(x)  # (batch, 64, 24 ,24)
            x2 = self.conv2(x1)  # (batch, 128,12, 12)
            x3 = self.conv3(x2)  # (batch, 256, 6, 6)

        x = x3.view(x.shape[0], -1)  # 数据扁平化
        print(x.shape)
        y = self.fc1(x)
        return y


if __name__ == '__main__':
    from torchstat import stat
    Facecnn = FaceCNN(num_classes=7,attention_name="eca")
    stat(Facecnn, (1, 36, 36))
