from torch import nn
from torchvision import models
from base_block import BaseConv, DWConv, CA_Block

class PP_LCnet(nn.Module):
    def __init__(
        self,
        wid_mul,
        attention=False,
        act="silu",
    ):
        super().__init__()

        base_channels = int(wid_mul * 64)  # 64

        #stem
        self.stem = nn.Conv2d(3, base_channels // 2, kernel_size=3, stride=2, padding=1)

        #block2
        self.block2 = nn.Sequential(
            DWConv(base_channels // 2, base_channels, ksize=3, stride=1, act=act),
            DWConv(base_channels, base_channels * 2, ksize=3, stride=2, act=act)
        )
        
        #self.stem = nn.Conv2d(3, base_channels * 2, kernel_size=4, stride=4)

        #block3
        if attention:
            self.block3 = nn.Sequential(
                DWConv(base_channels * 2, base_channels * 2, ksize=3, stride=1, act=act),
                CA_Block(base_channels * 2, base_channels * 2, ratio=0.5, act=act),
                DWConv(base_channels * 2, base_channels * 4, ksize=3, stride=2, act=act),
                CA_Block(base_channels * 4, base_channels * 4, ratio=0.5, act=act)
            )
        else:
            self.block3 = nn.Sequential(
                DWConv(base_channels * 2, base_channels * 2, ksize=3, stride=1, act=act),
                DWConv(base_channels * 2, base_channels * 4, ksize=3, stride=2, act=act),
            )

        #block4
        self.block4 = nn.Sequential(
            DWConv(base_channels * 4, base_channels * 4, ksize=3, stride=1, act=act),
            DWConv(base_channels * 4, base_channels * 8, ksize=3, stride=2, act=act)
        )

        #block5
        self.block5 = nn.Sequential(
            DWConv(base_channels * 8, base_channels * 8, ksize=5, stride=1, act=act),
            DWConv(base_channels * 8, base_channels * 8, ksize=5, stride=1, act=act),
            DWConv(base_channels * 8, base_channels * 8, ksize=5, stride=1, act=act),
            DWConv(base_channels * 8, base_channels * 8, ksize=5, stride=1, act=act),
            DWConv(base_channels * 8, base_channels * 8, ksize=5, stride=1, act=act),
            DWConv(base_channels * 8, base_channels * 16, ksize=5, stride=2, act=act),
            DWConv(base_channels * 16, base_channels * 16, ksize=5, stride=1, act=act)
        )
        
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.flatten = nn.Flatten()
        #self.relu = nn.ReLU()
        self.drop = nn.Dropout(0.5)
        self.fc = nn.Linear(base_channels * 16, 62)
        #self.sigmoid = nn.Sigmoid()
    
    def forward(self, x):
        x = self.stem(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.block4(x)
        x = self.block5(x)
        x = self.avg_pool(x)
        x = self.flatten(x)
        x = self.drop(x)
        #x = self.relu(x)
        x = self.fc(x)
        #x = self.sigmoid(x)
        return x

def get_model(wid_mul, attention=False):
    model = PP_LCnet(wid_mul=wid_mul, attention=attention)
    def init_weights(m):
        if type(m) == nn.Linear or type(m) == nn.Conv2d:
            nn.init.normal_(m.weight, std = 0.1)
    def init_weight(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03
    model.apply(init_weight)
    return model

def get_resnet18():
    model = models.resnet18(pretrained=True)
    num_features = model.fc.in_features
    model.fc = nn.Linear(num_features, 62)
    return model