﻿import torch
import torch.nn as nn
import torch.nn.functional as F

from torchsummary import summary




    
class LeNet6(nn.Module):
    def __init__(self,num_classes=10):
        super(LeNet6,self).__init__()

        self.lenet5_conv=nn.Sequential(
            #把输出通道数改为6，原来是1，做一个5*5卷积，padding=2
            nn.Conv2d(3,6,5,padding=0),    # 32-5+4+1=28    (b,c,h,w)=(b,1,32,32) -> (b,6,28,28)
            nn.ReLU(),
            nn.MaxPool2d(2,2),                 #(b,6,28,28) -> (b,6,14,14)
            nn.Conv2d(6,16,5),    #(b,6,14,14) -> (b,16,10,10)
            nn.ReLU(),
            nn.MaxPool2d(2,2),                #(b,16,10,10) -> (b,16,5,5)
        )

        self.lenet5_fc=nn.Sequential(
            nn.Flatten(),                                        #(b,16,5,5) -> (b,16*5*5)
            nn.Linear(16*5*5,120),                                  #(b,400) -> (b,120)
            nn.ReLU(),
            nn.Linear(120,84),                                     #(b,120) -> (b,84)
            nn.ReLU(),
            nn.Linear(84,num_classes)                                       #(b,84) -> (b,10)  
        )

    def _initalize_weights(self,Module):
        if isinstance(Module,nn.Linear):    #对所有全连接层用Xaiver初始化
            nn.init.xavier_uniform_(Module.weight)
            nn.init.zeros(Module.bias)
        if isinstance(Module.nn.Conv2d):    #对所有卷积层用kaiming初始化
            nn.init.kaiming_normal_(Module.weight,mode='fan_out',nonlinearity='relu')
            nn.init.zeros(Module.bias)
            
    
    def forward(self,x):
       
        # x = self.lenet5(x)
        x = self.lenet5_conv(x)
         # #原始图像为4维（b,c,h,w），我们需要将他变为2维[[b],[c,h,w]]
        x = x.view(-1,16*5*5)
        x = self.lenet5_fc(x)
        return x

class MLP_6(nn.Module):
    def __init__(self,num_classes=10):
        super(MLP_6,self).__init__()
        self.fc1 = nn.Linear(32*32, 2048)
        self.fc2 = nn.Linear(2048, 1024 )
        self.fc3 = nn.Linear(1024 , 512)
        self.fc4 = nn.Linear(512 , 254)
        self.fc5 = nn.Linear(254 , 128)
        self.fc6 = nn.Linear(128 , num_classes)

    def _initalize_weights(self,Module):
        if isinstance(Module,nn.Linear):    #对所有全连接层用Xaiver初始化
            nn.init.xavier_uniform_(Module.weight)
            nn.init.zeros(Module.bias)

    
    def forward(self,x):
        x =x.view(-1,32*32)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        x = F.relu(self.fc4(x))
        x = F.relu(self.fc5(x))
        x = self.fc6(x)
        return x

class AlexNet(nn.Module):
    """
    Neural network model consisting of layers propsed by AlexNet paper.
    """
    def __init__(self, num_classes=10):
        """
        Define and allocate layers for this neural net.

        Args:
            num_classes (int): number of classes to predict with this model
        """
        super().__init__()
        # input size should be : (b x 3 x 227 x 227)--->(b x 3 x 32 x 32)
        # The image in the original paper states that width and height are 224 pixels, but
        # the dimensions after first convolution layer do not lead to 55 x 55.
        self.net = nn.Sequential(
            # nn.Conv2d(in_channels=3, out_channels=96, kernel_size=11, stride=4),  #        (b x 96 x 55 x 55)
            #(227-11+0*2)/4 +1   
            nn.Conv2d(in_channels=3, out_channels=96, kernel_size=6, stride=1),  # (32-6+0*2)/1+1 =27           ------》(b x 96 x 27 x 27)
            nn.ReLU(),
            nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),  # section 3.3
            # '''
            # pooling :增加一定范围内的平移不变性、Maxpooling增加了梯度爆炸的可能性，反过来说删除可能会引起梯度消失。我们在修改网络层后需要重新调整超参数
            # '''
            # nn.MaxPool2d(kernel_size=3, stride=2),  # (b x 96 x 27 x 27)

            nn.Conv2d(96, 256, 5, padding=2),  # 27-5+2*2+1=27       (b x 256 x 27 x 27)
            nn.ReLU(),
            nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=2),
            nn.MaxPool2d(kernel_size=3, stride=2),  #（27-3）/2 +1 =13 (b x 256 x 13 x 13)


            nn.Conv2d(256, 384, 3, padding=1),  # 13-3+1*2+1=13 (b x 384 x 13 x 13)
            nn.ReLU(),
            nn.Conv2d(384, 384, 3, padding=1),  # 13-3+1*2+1=13 (b x 384 x 13 x 13)
            nn.ReLU(),
            nn.Conv2d(384, 256, 3, padding=1),  # 13-3+1*2+1=13 (b x 256 x 13 x 13)
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=3, stride=2),  #（13-3）/2 +1 (b x 256 x 6 x 6)
        )
        # classifier is just a name for linear layers
        self.classifier = nn.Sequential(
            nn.Dropout(p=0.5),
            nn.Linear(in_features=(256 * 6 * 6), out_features=4096),
            nn.ReLU(),
            nn.Dropout(p=0.5),
            nn.Linear(in_features=4096, out_features=4096),
            nn.ReLU(),
            nn.Linear(in_features=4096, out_features=num_classes),
        )
    def init_bias(self):
        for layer in self.net:
            if isinstance(layer, nn.Conv2d):
                nn.init.normal_(layer.weight, mean=0, std=0.01)
                nn.init.constant_(layer.bias, 0)
        # original paper = 1 for Conv2d layers 2nd, 4th, and 5th conv layers
        # nn.init.constant_(self.net[4].bias, 1)
        # nn.init.constant_(self.net[10].bias, 1)
        # nn.init.constant_(self.net[12].bias, 1)

        ## 因为删除了索引为3的pooling层，所以修改之后卷积层的索引全部-1.
        nn.init.constant_(self.net[3].bias, 1)
        nn.init.constant_(self.net[9].bias, 1)
        nn.init.constant_(self.net[11].bias, 1)

    def forward(self, x):
        """
        Pass the input through the net.

        Args:
            x (Tensor): input tensor

        Returns:
            output (Tensor): output tensor
        """
        x = self.net(x)
        x = x.view(-1, 256 * 6 * 6)  # reduce the dimensions for linear layer input
        return self.classifier(x)






def get_MLP_6():
    model = MLP_6()
    return model

def get_AlexNet():
    model = AlexNet()
    return model

def get_LeNet6():
    model = LeNet6()
    return model


if __name__=='__main__':
    modle = AlexNet()
    print(modle)
    modle = modle.net.to('cuda')
    summary(modle,(3,32,32))