
import torch

from .decoder import BaseDecoder
from .neck import FPN, MultiStageMerging, resize
from .convnext import convnext_base
from .DeepLabV3 import DeepLabV3
import torch.nn as nn 

class UNetConvNext(nn.Module):
    def __init__(self, out_channels):
        super().__init__()
        self.encoder = convnext_base(False, in_22k=False)
        self.encoder.load_state_dict(torch.load("./convnext_base_1k_384.pth", map_location="cpu")["model"])

        self.neck = FPN(features=[128, 256, 512, 1024], out_channels=256)
    
        self.neck_merge = MultiStageMerging([256, 256, 256, 256], out_channels=256)

        self.pred_head = nn.Conv2d(256, 2, 1, 1, 0, bias=False)

    def forward(self, x):
        pass 
        input_size = x.shape[2:]
        # print(input_size)
        skips = self.encoder(x)
        out = self.neck(skips)
        out = self.neck_merge(out)

        out = self.pred_head(out)
        # print(out.shape)
        out = resize(out, size=input_size, mode="bilinear")

        return out