import torch
import torch.nn as nn
from self_attention import SpatialTemporalAttentionNoBatch


# input_size [channel_nums=1, frame_nums, n_mfcc=30, 44]
class CSBSModel(nn.Module):
    def __init__(self, input_channels, output_channels, kernel_size, lstm_hidden_size, num_lstm_layers):
        super(CSBSModel, self).__init__()

        self.conv1 = nn.Conv2d(input_channels, output_channels, kernel_size=kernel_size, stride=1, padding=1)
        self.bn1 = nn.BatchNorm2d(output_channels)
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, ceil_mode=True)
        self.relu1 = nn.ReLU()  # [nums_frame, channels, w, h]

        self.conv2 = nn.Conv2d(output_channels, output_channels*2, kernel_size=kernel_size, stride=1, padding=1)
        self.bn2 = nn.BatchNorm2d(output_channels*2)
        self.pool2 = nn.AdaptiveAvgPool2d((4, 4))  # [nums_frame, channnels, 1, 1]
        self.relu2 = nn.ReLU()

        self.attn1 = SpatialTemporalAttentionNoBatch(output_channels*2)

        self.lstm = nn.LSTM(output_channels*2*4*4, lstm_hidden_size, num_lstm_layers, batch_first=False, bidirectional=True)
        self.attn2 = SpatialTemporalAttentionNoBatch(lstm_hidden_size*2)

        self.fc = nn.Sequential(

            nn.Linear(lstm_hidden_size * 2, 64),
            nn.ReLU(),
            nn.Linear(64, 1)  # 输出维度为2
        )

    def forward(self, x):
        x = x.squeeze(0).permute(1, 0, 2, 3)#移除批次维度并调整维度顺序
        x = self.relu1(self.bn1(self.conv1(x)))
        x = self.pool1(x)
        x = self.relu2(self.bn2(self.conv2(x)))
        x = self.pool2(x)

        attn_out1 = self.attn1(x)
        frs, ch, w, h = attn_out1.shape
        attn_out1 = attn_out1.reshape(frs, ch*w*h)
        lstm_input = attn_out1.unsqueeze(1)
        lstm_out, _ = self.lstm(lstm_input)
        lstm_out = lstm_out.squeeze(1)
        lstm_out = lstm_out.unsqueeze(-1).unsqueeze(-1)

        attn_out2 = self.attn2(lstm_out)
        attn_out2 = attn_out2.squeeze(-1).squeeze(-1)

        output = self.fc(attn_out2)

        return output


if __name__ == '__main__':
    model = CSBSModel(1, 32, 3, 128, 2)
    input = torch.randn(1, 1, 81, 30, 44)
    # input = input.squeeze(0).permute(1, 0, 2, 3)
    output = model(input)
    # print(output.shape)
    # print(type(output))
    print(output)
