﻿import torch
import torch.nn as nn
import torch.nn.functional as F

import torch.nn as nn
import torch.nn.functional as F


import torch.nn as nn
import torch.nn.functional as F

import torch.nn as nn
import torch.nn.functional as F

class LeNet5(nn.Module):
    def __init__(self):
        super(LeNet5,self).__init__()

        self.lenet5=nn.Sequential(
            #把输出通道数改为6，原来是1，做一个5*5卷积，padding=2
            nn.Conv2d(1,6,kernel_size=5,stride=1,padding=2),    #(b,c,h,w)=(b,1,28,28) -> (b,6,28,28)
            nn.ReLU(),
            nn.AvgPool2d(kernel_size=2,stride=2),                 #(b,6,28,28) -> (b,6,14,14)
            nn.Conv2d(6,16,kernel_size=5,stride=1,padding=0),    #(b,6,14,14) -> (b,16,10,10)
            nn.ReLU(),
            nn.AvgPool2d(kernel_size=2,stride=2),                #(b,16,10,10) -> (b,16,5,5)

            nn.Flatten(),                                        #(b,16,5,5) -> (b,16*5*5)
            nn.Linear(16*5*5,120),                                  #(b,400) -> (b,120)
            nn.ReLU(),
            nn.Linear(120,84),                                     #(b,120) -> (b,84)
            nn.ReLU(),
            nn.Linear(84,10)                                       #(b,84) -> (b,10)
            
        )
    
    def forward(self,x):
        #原始图像为4维（b,c,h,w），我们需要将他变为2维[[b],[c,h,w]]
        x = x.view(-1,1,28,28)
        x = self.lenet5(x)
        return x
    
class LeNet6(nn.Module):
    def __init__(self):
        super(LeNet6,self).__init__()

        self.lenet5_conv=nn.Sequential(
            #把输出通道数改为6，原来是1，做一个5*5卷积，padding=2
            nn.Conv2d(1,6,5,padding=2),    #(b,c,h,w)=(b,1,28,28) -> (b,6,28,28)
            nn.ReLU(),
            nn.MaxPool2d(2,2),                 #(b,6,28,28) -> (b,6,14,14)
            nn.Conv2d(6,16,5),    #(b,6,14,14) -> (b,16,10,10)
            nn.ReLU(),
            nn.MaxPool2d(2,2),                #(b,16,10,10) -> (b,16,5,5)
        )

        self.lenet5_fc=nn.Sequential(
            nn.Flatten(),                                        #(b,16,5,5) -> (b,16*5*5)
            nn.Linear(16*5*5,120),                                  #(b,400) -> (b,120)
            nn.ReLU(),
            nn.Linear(120,84),                                     #(b,120) -> (b,84)
            nn.ReLU(),
            nn.Linear(84,10)                                       #(b,84) -> (b,10)  
        )

    def _initalize_weights(self,Module):
        if isinstance(Module,nn.Linear):    #对所有全连接层用Xaiver初始化
            nn.init.xavier_uniform_(Module.weight)
            nn.init.zeros(Module.bias)
        if isinstance(Module.nn.Conv2d):    #对所有卷积层用kaiming初始化
            nn.init.kaiming_normal_(Module.weight,mode='fan_out',nonlinearity='relu')
            nn.init.zeros(Module.bias)
            
    
    def forward(self,x):
       
        # x = self.lenet5(x)
        x = self.lenet5_conv(x)
         # #原始图像为4维（b,c,h,w），我们需要将他变为2维[[b],[c,h,w]]
        x = x.view(x.size(0),-1)
        x = self.lenet5_fc(x)
        return x

class MLP_6(nn.Module):
    def __init__(self):
        super(MLP_6,self).__init__()
        self.fc1 = nn.Linear(28*28, 2048)
        self.fc2 = nn.Linear(2048, 1024 )
        self.fc3 = nn.Linear(1024 , 512)
        self.fc4 = nn.Linear(512 , 254)
        self.fc5 = nn.Linear(254 , 128)
        self.fc6 = nn.Linear(128 , 10)

    def _initalize_weights(self,Module):
        if isinstance(Module,nn.Linear):    #对所有全连接层用Xaiver初始化
            nn.init.xavier_uniform_(Module.weight)
            nn.init.zeros(Module.bias)

    
    def forward(self,x):
        x =x.view(-1,28*28)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        x = F.relu(self.fc4(x))
        x = F.relu(self.fc5(x))
        x = self.fc6(x)
        return x
    





def get_MLP_6():
    model = MLP_6()
    return model

def get_LeNet5():
    model = LeNet5()
    return model

def get_LeNet6():
    model = LeNet6()
    return model