import torch
from sklearn.model_selection import train_test_split
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch
from torch.nn import Linear, ReLU, ModuleList, Sequential, Dropout, Softmax, Tanh
import torch.nn.functional as F

# 定义改进后的多层感知机（MLP）模型
class ImprovedMLP(nn.Module):
    def __init__(self, num_classes, input_size):
        super(ImprovedMLP, self).__init__()
        #         self.bn3 = nn.BatchNorm1d(175)
        self.num_classes = num_classes  # number of classes
        self.input_size = input_size  # input size
        self.fc1 = nn.Linear(input_size, 256)
        self.relu1 = nn.ReLU()
        self.bn1 = nn.BatchNorm1d(256)
        self.dropout1 = nn.Dropout(0.5)

        self.fc2 = nn.Linear(256, 128)
        self.relu2 = nn.ReLU()
        self.bn2 = nn.BatchNorm1d(128)
        self.dropout2 = nn.Dropout(0.5)

        self.fc3 = nn.Linear(128, num_classes)  # 输出层，3 个类别

    def forward(self, x):
        #         x = self.bn3(x)
        x = self.fc1(x)
        x = self.bn1(x)
        x = self.relu1(x)
        x = self.dropout1(x)

        x = self.fc2(x)
        x = self.bn2(x)
        x = self.relu2(x)
        x = self.dropout2(x)

        x = self.fc3(x)

        return x
    # 额外的方法，用于获取softmax输出
    def get_softmax_outputs(self, outputs):
        return torch.nn.functional.softmax(outputs, dim=1)



