import torch
import torch.nn as nn



class MLP(nn.Module):
    def __init__(self,input_channles):
        super().__init__()
        self.conv1=nn.Conv2d(in_channels=input_channles,out_channels=32,kernel_size=(5,5)) #(N,1,28,28) -> (N,32,24,24)
        nn.init.kaiming_uniform_(self.conv1.weight, nonlinearity='relu')
        self.act1=nn.ReLU()
        self.pool1 = nn.MaxPool2d(kernel_size=(2, 2),stride=(2,2))#(N,32,24,24) -> (N,32,12,12)

        self.conv2=nn.Conv2d(32,32,(5,5))#(N,32,12,12) -> (N,32,8,8)
        nn.init.kaiming_uniform_(self.conv2.weight, nonlinearity='relu')
        self.act2 = nn.ReLU()
        self.pool2 = nn.MaxPool2d(kernel_size=(2, 2), stride=(2, 2))#(N,32,8,8) -> (N,32,4,4)


        self.line1=nn.Linear(4*4*32,100) #(N,32*4*4) -> (N,100)
        nn.init.kaiming_uniform_(self.line1.weight, nonlinearity='relu')
        self.act3 = nn.ReLU()
        self.line2=nn.Linear(100,10)#(N,100)-> (N,10)
        self.act4=nn.Softmax(dim=1)
        # ！！！ 交叉熵里面有softmax，不需要在这里写了


    def forward(self,X):

        # 输入到隐层 1
        X = self.conv1(X)
        # print(X.shape)
        X = self.act1(X)
        X = self.pool1(X)
        # print(X.shape)
        # 隐层 2
        X = self.conv2(X)
        # print(X.shape)
        X = self.act2(X)
        X = self.pool2(X)
        # print(X.shape)
        # 扁平化
        X = X.view(-1, 4* 4 * 32)
        # print(X.shape)

        # 输出层
        X = self.line1(X)
        # print(X.shape)
        X = self.act3(X)
        X = self.line2(X)
        # print(X.shape)
        X = self.act4(X)
        return X

'''
class MLP(nn.Module):
    def __init__(self, input_channels):
        super().__init__()
        self.features = nn.Sequential(
            nn.Conv2d(input_channels, 32, kernel_size=5), # (N, C, 28, 28) -> (N, 32, 24, 24)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2),        # -> (N, 32, 12, 12)
            nn.Conv2d(32, 32, kernel_size=5),             # -> (N, 32, 8, 8)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2, stride=2)         # -> (N, 32, 4, 4)
        )
        self.classifier = nn.Sequential(
            nn.Linear(4*4*32, 100),
            nn.ReLU(),
            nn.Linear(100, 10)
            # Softmax is not needed here if you're using nn.CrossEntropyLoss
        )

        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                nn.init.kaiming_uniform_(m.weight, nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.features(x)
        x = x.view(-1, 4*4*32)
        x = self.classifier(x)
        return x
'''

if __name__=="__main__":
    input=torch.randn((32,1,28,28))
    print(input.shape)

    model=MLP(1)
    output=model(input)
    print(output.shape)

    print(output)