import torch.nn as nn

# 卷积层使用 torch.nn.Conv1d
# 激活层使用 torch.nn.ReLU
# 池化层使用 torch.nn.MaxPool1d
# 全连接层使用 torch.nn.Linear
# 归一化使用nn.BatchNorm1d(卷积的输出通道数),最常用于卷积网络中(防止梯度消失或爆炸)

class LSTMCNN(nn.Module):
    def __init__(self, C_in):
        super(LSTMCNN, self).__init__()
        self.lstm = nn.LSTM(
            input_size=C_in,
            hidden_size=64,
            num_layers=1,
            batch_first=True,
            # dropout=0.1,
        )
        self.cnn = nn.Sequential(
            nn.Dropout(0.2),

            nn.Conv1d(1, 8, 3, 1, 1),  # 64-64-32
            nn.BatchNorm1d(8),
            nn.ReLU(),
            nn.MaxPool1d(2, 2),

            nn.Conv1d(8, 16, 3, 1, 1),  # 32-32-16
            nn.BatchNorm1d(16),
            nn.ReLU(),
            nn.MaxPool1d(2, 2),

            nn.Conv1d(16, 64, 3, 1, 1),  # 16-16-8
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.MaxPool1d(2, 2),
        )
        self.linear = nn.Sequential(
            # nn.Dropout(0.1),
            nn.Linear(512, 64),   # 512 = 8 * 64
            nn.BatchNorm1d(64),
            nn.ReLU(),
            nn.Linear(64, 1)
        )

    def forward(self, x):
        r_out, (h_n, h_c) = self.lstm(x, None)
        x1 = r_out[:, -1, :]
        x2 = self.cnn(x1.reshape(x1.shape[0], 1, x1.shape[1]))
        x3 = x2.view(x2.size()[0], -1)
        out = self.linear(x3)
        out = out.squeeze(-1)  #####
        return out