import torch
import torch.nn as nn
from torchinfo import summary
import hiddenlayer


class RNN(nn.Module):
    def __init__(self):
        super(RNN, self).__init__()

        # 循环层
        self.rnn1 = nn.RNN(input_size=1, hidden_size=16, batch_first=True)
        self.rnn2 = nn.RNN(input_size=16, hidden_size=32, batch_first=True)
        self.rnn3 = nn.RNN(input_size=32, hidden_size=64, batch_first=True)
        self.rnn4 = nn.RNN(input_size=64, hidden_size=128, batch_first=True)

        self.relu = nn.ReLU()
        self.pool = nn.AvgPool1d(kernel_size=7, stride=5)
        self.fc = nn.Linear(in_features=640, out_features=10)

    def forward(self, x):
        # [8,4000,1]->[8,4000,16]
        x, _ = self.rnn1(x)
        x = self.relu(x)

        # [8,4000,16]->[8,799,16]
        x = x.permute(0, 2, 1)
        x = self.pool(x)
        x = x.permute(0, 2, 1)

        x, _ = self.rnn2(x)
        x = self.relu(x)
        x = x.permute(0, 2, 1)
        x = self.pool(x)
        x = x.permute(0, 2, 1)

        x, _ = self.rnn3(x)
        x = self.relu(x)
        x = x.permute(0, 2, 1)
        x = self.pool(x)
        x = x.permute(0, 2, 1)

        x, _ = self.rnn4(x)
        x = self.relu(x)
        x = x.permute(0, 2, 1)
        x = self.pool(x)
        x = x.permute(0, 2, 1)

        x = x.flatten(start_dim=1)
        return self.fc(x)


def getModel():
    return RNN()


if __name__ == '__main__':
    net = getModel().to(torch.device('cuda'))
    print(net)
    # print(net(torch.randn(8, 4000, 1)).shape)
    print(summary(net, (8, 4000, 1)))
    from torchviz import make_dot

