﻿import torch
import torch.nn as nn
import torch.nn.functional as F

import torch.nn as nn
import torch.nn.functional as F


import torch.nn as nn
import torch.nn.functional as F

import torch.nn as nn
import torch.nn.functional as F

class MLP_3_BN_KaimingInit(nn.Module):
    def __init__(self):
        super(MLP_3_BN_KaimingInit, self).__init__()
        self.fc1 = nn.Linear(28 * 28, 1024)
        self.bn1 = nn.BatchNorm1d(1024)
        self.fc2 = nn.Linear(1024, 256)
        self.bn2 = nn.BatchNorm1d(256)
        self.fc3 = nn.Linear(256, 10)
        
        # Kaiming weight initialization
        nn.init.kaiming_uniform_(self.fc1.weight, nonlinearity='relu')
        nn.init.kaiming_uniform_(self.fc2.weight, nonlinearity='relu')
        nn.init.kaiming_uniform_(self.fc3.weight, nonlinearity='relu')

    def forward(self, x):
        x = x.view(-1, 28 * 28)
        x = F.relu(self.bn1(self.fc1(x)))
        x = F.relu(self.bn2(self.fc2(x)))
        x = self.fc3(x)
        return x


class MLP_3_BN_XavierInit(nn.Module):
    def __init__(self):
        super(MLP_3_BN_XavierInit, self).__init__()
        self.fc1 = nn.Linear(28 * 28, 1024)
        self.bn1 = nn.BatchNorm1d(1024)
        self.fc2 = nn.Linear(1024, 256)
        self.bn2 = nn.BatchNorm1d(256)
        self.fc3 = nn.Linear(256, 10)
        
        # Xavier weight initialization
        nn.init.xavier_uniform_(self.fc1.weight)
        nn.init.xavier_uniform_(self.fc2.weight)
        nn.init.xavier_uniform_(self.fc3.weight)

    def forward(self, x):
        x = x.view(-1, 28 * 28)
        x = F.relu(self.bn1(self.fc1(x)))
        x = F.relu(self.bn2(self.fc2(x)))
        x = self.fc3(x)
        return x


class MLP_3_BN(nn.Module):
    def __init__(self):
        super(MLP_3_BN, self).__init__()
        self.fc1 = nn.Linear(28 * 28, 1024)
        self.bn1 = nn.BatchNorm1d(1024)  # Batch normalization layer after fc1
        self.fc2 = nn.Linear(1024, 256)
        self.bn2 = nn.BatchNorm1d(256)   # Batch normalization layer after fc2
        self.fc3 = nn.Linear(256, 10)
    
    def forward(self, x):
        x = x.view(-1, 28 * 28)
        x = F.relu(self.bn1(self.fc1(x)))  # Apply BatchNorm and ReLU after fc1
        x = F.relu(self.bn2(self.fc2(x)))  # Apply BatchNorm and ReLU after fc2
        x = self.fc3(x)                    # Output layer
        return x


class MLP_3(nn.Module):
    def __init__(self):
        super(MLP_3,self).__init__()
        self.fc1 = nn.Linear(28*28, 1024)
        self.fc2 = nn.Linear(1024, 256 )
        self.fc3 = nn.Linear(256 , 10)
    
    def forward(self,x):
        x =x.view(-1,28*28)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x

    
class MLP_3max(nn.Module):
    def __init__(self):
        super(MLP_3max,self).__init__()
        self.fc1 = nn.Linear(28*28, 4096)
        self.fc2 = nn.Linear(4096, 256 )
        self.fc3 = nn.Linear(256 , 10)
    
    def forward(self,x):
        x =x.view(-1,28*28)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x
    


class MLP_6(nn.Module):
    def __init__(self):
        super(MLP_6,self).__init__()
        self.fc1 = nn.Linear(28*28, 2048)
        self.fc2 = nn.Linear(2048, 1024 )
        self.fc3 = nn.Linear(1024 , 512)
        self.fc4 = nn.Linear(512 , 254)
        self.fc5 = nn.Linear(254 , 128)
        self.fc6 = nn.Linear(128 , 10)
    
    def forward(self,x):
        x =x.view(-1,28*28)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        x = F.relu(self.fc4(x))
        x = F.relu(self.fc5(x))
        x = self.fc6(x)
        return x
    


class MLP_9(nn.Module):
    def __init__(self):
        super(MLP_9,self).__init__()
        self.fc1 = nn.Linear(28*28, 2048)
        self.fc2 = nn.Linear(2048, 1024 )
        self.fc3 = nn.Linear(1024 , 512)
        self.fc4 = nn.Linear(512 , 254)
        self.fc5 = nn.Linear(254 , 128)
        self.fc6 = nn.Linear(128 , 64)
        self.fc7 = nn.Linear(64 , 32)
        self.fc8 = nn.Linear(32 , 16)
        self.fc9 = nn.Linear(16 , 10)
    
    def forward(self,x):
        x =x.view(-1,28*28)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        x = F.relu(self.fc4(x))
        x = F.relu(self.fc5(x))
        x = F.relu(self.fc6(x))
        x = F.relu(self.fc7(x))
        x = F.relu(self.fc8(x))
        x = self.fc9(x)
        return x
    

def get_MLP_3_BN():
    model = MLP_3_BN()
    return model

def get_MLP_3_BN_XavierInit():
    model = MLP_3_BN_XavierInit()
    return model

def get_MLP_3_BN_KaimingInit():
    model = MLP_3_BN_KaimingInit()
    return model

def get_MLP_3():
    model = MLP_3()
    return model

def get_MLP_3MAX():
    model = MLP_3max()
    return model

def get_MLP_6():
    model = MLP_6()
    return model

def get_MLP_9():
    model = MLP_9()
    return model