from torch import nn
from torch.nn import functional as F
from functools import partial
import torch
import parameters as p


class Block(nn.Module):
    def __init__(self,layers:int,in_channels,hidden_channels:int,out_channels:int, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.layers = nn.ModuleList([])
        conv = partial(nn.Conv2d,kernel_size=3,padding=1)

        self.layers.append(conv(in_channels=in_channels,out_channels=hidden_channels))
        
        if layers > 2:
            for _ in range(layers-2):
                self.layers.append(conv(in_channels=hidden_channels,out_channels=hidden_channels))

        self.layers.append(conv(in_channels=hidden_channels,out_channels=out_channels))
        self.pool = nn.MaxPool2d(kernel_size=2,stride=2)
        
    def forward(self,x):
        for layer in self.layers:
            x = layer(x)

        
        return self.pool(x)
    



def layer_change_function(x):
    return 2 if x <= 2 else 4
def factor_change_function(x):
    return 2**x if x<=3 else 2**3


class ConvNet(nn.Module):
    def __init__(self, is_gray_image:bool=False,num_class:int=1000,*args, **kwargs) -> None:
        super().__init__(*args, **kwargs)

        self.layers = nn.ModuleList([])
        self.fcs = nn.ModuleList([nn.Flatten()])
        channels = 1 if is_gray_image else 3
        for index in range(5):
            layer_num = layer_change_function(index)
            factor = factor_change_function(index) 
            in_channels = channels if not index else p.FILTER_BASE_LINE*(2**(index-1))

            self.layers.append(Block(layers=layer_num,in_channels=in_channels,hidden_channels=p.FILTER_BASE_LINE*factor,out_channels=p.FILTER_BASE_LINE*factor))

        
        for index in range(3):
            in_features = 512 if not index else 4096
            out_features = 4096 if index != 2 else num_class

            self.fcs.append(nn.Linear(in_features,out_features))

    def forward(self,x):
        for layer in self.layers:
            x = layer(x)
        for layer in self.fcs:
            x = layer(x)
        return F.softmax(x,dim=-1)
        # return x



if __name__ == "__main__":
    # c =  Block(2,3,64,64)
    model = ConvNet(is_gray_image=True,num_class=10)
    # for module in c.modules():��
        # print(module)
    # fc = nn.Linear(in_features=224,out_features=1024)
    x = torch.randn([4,1,32,32])
    # f = nn.Flatten()
    # print(f(x).shape)
    # print(c(x).shape)
    print(model(x))