import torch
import torch.nn as nn
import torch.nn.init as netinit

#模型定义
class FCN_LSTM(nn.Module):
     def __init__(self, max_sequence_length, nb_class, num_cells=8):
        super(FCN_LSTM, self).__init__()
        
        self.max_sequence_length = max_sequence_length           #最大序列长度，即每段时间序列点的数量
        self.nb_class = nb_class                                 #最终分类的数量
        self.num_cells = num_cells                               #LSTM输出的隐藏层长度
        
        #实例化LSTM层，输入长度max_sequence_length，隐藏层长度num_cells，把第一维度作为批次数，dropout概率为0.8
        self.lstm = nn.LSTM(1, self.num_cells, batch_first = True)    
        
        self.dropout = nn.Dropout(p=0.1)
        
        #卷积-批归一化-RELU激活， 输入为通道数为1，即(1，max_sequence_length)，输出通道数为128，卷积核长度为8，填充至输出与输入长度相等，即(128，max_sequence_length)，
        self.conv1 = nn.Conv1d(1, 128, 8, padding=(8-1)//2)
        self.bn1 = nn.BatchNorm1d(128)
        self.relu1 = nn.ReLU()
        
        #卷积层2
        self.conv2 = nn.Conv1d(128, 256, 5, padding=(5-1)//2)
        self.bn2 = nn.BatchNorm1d(256)
        self.relu2 = nn.ReLU()
        
        #卷积层3
        self.conv3 = nn.Conv1d(256, 128, 3, padding=(3-1)//2)
        self.bn3 = nn.BatchNorm1d(128)
        self.relu3 = nn.ReLU()
        
        #全连接层
        self.linear = nn.Linear(128+num_cells, self.nb_class)
        
        #池化层
        self.pool = nn.AdaptiveAvgPool1d(1)
        
        #softmax回归
        self.softmax = nn.Softmax(dim = 1)
        
        #卷积层权重初始化
        netinit.kaiming_normal_(self.conv1.weight,  mode='fan_in', nonlinearity='relu')
        netinit.kaiming_normal_(self.conv2.weight,  mode='fan_in', nonlinearity='relu')
        netinit.kaiming_normal_(self.conv3.weight,  mode='fan_in', nonlinearity='relu')
        

        
     def forward(self,input):#接受的输入为(batch_size, max_sequence_length)
         #将张量形状由(batch_size, max_sequence_length)修改为(batch_size, 1, max_sequence_length)，即输入为batch_size批次个，形状为（1，max_sequence_length）的张量
         x = input.unsqueeze(2)
         input = input.unsqueeze(1)
         # print("x shape=", x.shape)
         #这里只得到最后一个时间步输出的结果作为x
         # , (x, _) = self.lstm(input)
         x, (_, _) = self.lstm(x)
         x = x[:, -1, :]
         print('lstm output is',x.shape)
         x = self.dropout(x)
         #卷积操作
         y = self.conv1(input)
         y = self.bn1(y)
         y = self.relu1(y)
        #  print('conv1 output shape is', y.shape)

         y = self.conv2(y)
         y = self.bn2(y)
         y = self.relu2(y)
         
         y = self.conv3(y)
         y = self.bn3(y)
         y = self.relu3(y)
         
         #池化操作
         y = self.pool(y)
        #  print('pool output shape is', y.shape)
         x = x.reshape(-1 , self.num_cells)
         y = y.reshape(-1,128)
         print('lstm reshape is',x.shape)
        #  print('y reshape is',y.shape)
         #组合操作
         out = torch.cat((x,y),dim=1)
         out = self.linear(out)
         out = self.softmax(out)
         
         return out
     
# model = FCN_LSTM(max_sequence_length = 176, nb_class = 8, num_cells = 8)

# # # print(model)

# x = torch.randn((10,176))
# y = model.forward(x)
# print(x)
# print(y)


         
         
    
        
        
        
        