import torch
from torch import nn
import torch.nn.functional as F


class MyMlp(nn.Module):
    def __init__(self, input_size, output_size, hidden_layers, dropout=0.5):
        super(MyMlp, self).__init__()
        self.dropout = nn.Dropout(dropout)
        self.linear1 = nn.Linear(input_size, hidden_layers[0])
        self.bn1 = nn.BatchNorm1d(hidden_layers[0])
        self.linear2 = nn.Linear(hidden_layers[0], hidden_layers[1])
        self.bn2 = nn.BatchNorm1d(hidden_layers[1])
        self.linear3 = nn.Linear(hidden_layers[1], hidden_layers[2])
        self.bn3 = nn.BatchNorm1d(hidden_layers[2])
        self.linear4 = nn.Linear(hidden_layers[2], output_size)

    def forward(self, x):
        x = self.dropout(F.relu(self.bn1(self.linear1(x))))
        x = self.dropout(F.relu(self.bn2(self.linear2(x))))
        x = self.dropout(F.relu(self.bn3(self.linear3(x))))
        # 最后一层不用加激活函数，因为采用的损失函数为CrossEntropyLoss中自带一个softmax函数。在这个损失函数中，先经过一个softmax，然后再计算交叉熵。
        x = self.linear4(x)
        return x


if __name__ == '__main__':
    # x = torch.ones([2, 1262])
    # model = MyMlp(1262, 1, [64, 64])
    # print(model(x))
    print(eval('lambda x: x + 1')(1))