import torch
import hyperparameters
from torch import nn


class SERNet_Block(nn.Module):
    """  Light-SERNet 模型的一个块
         该模型的大致结构都是 Conv、BN、ReLU、Avg-pooling 构成的
         不同的卷积核的大小与池化核的大小

    inputs:
        input_channel: 输出通道数
        out_channel: 输出通道数
        conv_size: 卷积核的大小
        conv_padding: 卷积的填充
        conv_stride: 卷积的步长
        pool_size: 池化的大小
        pool_padding: 池化的填充
        pool_stride: 池化的步长
        conv_bias: 卷积层是否使用偏置，默认使用
    """
    def __init__(self, input_channel, out_channel, \
        conv_size, conv_padding, conv_stride, \
        pool_size, pool_padding, pool_stride, conv_bias=True) -> None:
        super(SERNet_Block, self).__init__()

        self.conv = nn.Conv2d(
            input_channel, out_channel, kernel_size=conv_size,
            padding=conv_padding, stride=conv_stride, bias=conv_bias
        )
        self.norm = nn.BatchNorm2d(out_channel)
        self.relu = nn.ReLU()
        self.avg = nn.AvgPool2d(kernel_size=pool_size, padding=pool_padding, stride=pool_stride)
    
    def forward(self, x):
        x = self.conv(x)
        x = self.norm(x)
        x = self.relu(x)
        return self.avg(x)


class Light_SERNet_V1(nn.Module):
    """  复现 Light_SERNet 模型
         MFCC 提取后得到的形状： (1, 40, 251) 这是 4.0s 的形状
        
    inputs:
        output_class: 最终输出的分类数量
    """
    def __init__(self, output_class) -> None:
        super(Light_SERNet_V1, self).__init__()

        # Body Part I
        self.part1_1 = SERNet_Block(1, 32, 3, 1, 1, 2, 0, 2)
        self.part1_2 = SERNet_Block(1, 32, (9, 1), (4, 0), 1, 2, 0, 2)
        self.part1_3 = SERNet_Block(1, 32, (1, 11), (0, 5), 1, 2, 0, 2)

        # Body Part II
        self.part2_1 = SERNet_Block(96, 64, 3, 1, 1, 2, 0, 2, conv_bias=False)
        self.part2_2 = SERNet_Block(64, 96, 3, 1, 1, 2, 0, 2, conv_bias=False)
        self.part2_3 = SERNet_Block(96, 128, 3, 1, 1, (1, 2), (0, 1), (1, 2), conv_bias=False)
        self.part2_4 = SERNet_Block(128, 160, 3, 1, 1, (1, 2), 0, (1, 2), conv_bias=False)
        self.part2_5 = nn.Sequential(
            nn.Conv2d(160, 320, 1, bias=False), nn.BatchNorm2d(320),
            nn.ReLU(), nn.AdaptiveAvgPool2d((1, 1))
        )

        # Head Classifier
        self.drop = nn.Dropout(p=hyperparameters.DROPOUT)
        self.line = nn.Linear(320, output_class)
        self.out = nn.Softmax(dim=1)

    def forward(self, x):
        # 以下备注形状是音频长度为 3.0s 的形状
        x1_1 = self.part1_1(x)  # out: (32, 20, 125)
        # print("x1_1 shape: ", x1_1.shape)
        x1_2 = self.part1_2(x)  # out: (32, 20, 125)
        # print("x1_2 shape: ", x1_2.shape)
        x1_3 = self.part1_3(x)  # out: (32, 20, 125)
        # print("x1_3 shape: ", x1_3.shape)
        x = torch.cat((x1_1, x1_2, x1_3), dim=1)  # out: (96, 20, 125)
        # print("Part I out shape: ", x.shape)
        x = self.part2_1(x)  # out: (64, 10, 62)
        # print("Part2_1 shape: ", x.shape)
        x = self.part2_2(x)  # out: (96, 5, 31)
        # print("Part2_2 shape: ", x.shape)
        x = self.part2_3(x)  # out: (128, 5, 16)
        # print("Part2_3 shape: ", x.shape)
        x = self.part2_4(x)  # out: (160, 5, 8)
        # print("Part2_4 shape: ", x.shape)
        x = self.part2_5(x).reshape(-1, 320)  # out: (batch_size, 320)
        # print("Part2_5 shape: ", x.shape)
        x = self.drop(x)  # out: (batch_size, 320)
        # print("Dropout shape: ", x.shape)
        x = self.out(self.line(x))  # out: (batch_size, output_class)
        # print("out shape: ", x.shape)
        return x


if __name__ == "__main__":
    x = torch.randn(1, 1, 40, 251)
    net = Light_SERNet_V1(len(hyperparameters.CASIA_LABELS))
    print("orginal shape: ", x.shape)
    net(x)
