import torch
from torch import nn

class SIMPLE_NET(torch.nn.Module):
    def __init__(self):
        super(SIMPLE_NET, self).__init__()
        self.linear = torch.nn.Linear(28 * 28, 10)

    def forward(self, x):
        output = self.linear(x)
        return output

class MULTI_LAYER(torch.nn.Module):
    def __init__(self):
        super(MULTI_LAYER, self).__init__()
        self.linear_stack =torch.nn.Sequential(
                torch.nn.Linear(28 * 28, 512),
                torch.nn.Dropout(0.4),
                torch.nn.ReLU(),
                torch.nn.Linear(512, 256),
                torch.nn.Dropout(0.4),
                torch.nn.ReLU(),
                torch.nn.Linear(256, 64),
                torch.nn.Dropout(0.4),
                torch.nn.ReLU(),
                torch.nn.Linear(64, 10),
                torch.nn.Softmax()
            )

    def forward(self, x):
        output = self.linear_stack(x)
        return output

class CNN(torch.nn.Module):
    def __init__(self):
        super(CNN, self).__init__()
        self.cnn_linear_stack = torch.nn.Sequential(
            nn.Conv2d(1, 20, 5, 1), #in_channels, out_channels, kernel_size, stride
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(20, 50, 5, 1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Flatten(),
            nn.Linear(4 * 4 * 50, 256),
            nn.Dropout(0.4),
            nn.ReLU(),
            nn.Linear(256, 10),
            nn.LogSoftmax(),   
        )

    def forward(self, x):
        x = x.view(-1, 1, 28, 28)
        output = self.cnn_linear_stack(x)
        return output

class CIFAR10_CNN(torch.nn.Module):
    def __init__(self, class_num):
        super(CIFAR10_CNN, self).__init__()
        self.cifar10_cnn_stack = torch.nn.Sequential(
            nn.Conv2d(3, 20, 5, 1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(20, 50, 5, 1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Flatten(),
            nn.Linear(5 * 5 * 50, 256),
            nn.Dropout(0.4),
            nn.ReLU(),
            nn.Linear(256, class_num),
            nn.LogSoftmax(dim = 1),
        )

    def forward(self, x):
        #这里的数据x需要是tensor
        x = x.view(-1, 3, 32, 32)
        output = self.cifar10_cnn_stack(x)
        return output

class FEDPER(torch.nn.Module):
    def __init__(self):
        super(FEDPER, self).__init__()
        self.fedper = torch.nn.Sequential(
            nn.Conv2d(3, 20, 5, 1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Conv2d(20, 50, 5, 1),
            nn.ReLU(),
            nn.MaxPool2d(2, 2),
            nn.Flatten(),
            nn.Linear(5 * 5 * 50, 256),
            nn.Dropout(0.4),
            nn.ReLU(),
            nn.Linear(256, 64),
            nn.Dropout(0.4),
            nn.ReLU(),
            nn.Linear(64, 32),
            nn.Dropout(0.4),
            nn.ReLU(),
            nn.Linear(32, 10),
            nn.LogSoftmax(dim = 1)
        )

    def forward(self, x):
        x = x.view(-1, 3, 32, 32)
        output = self.fedper(x)
        return output
