import torch
import torch.nn as nn
from   .resnet50 import resnet50
from   .vggnet16 import VGG16

class UnetBlock(nn.Module):
    def __init__(self, in_size, out_size):
        super(UnetBlock, self).__init__()
        self.conv1  = nn.Conv2d(in_size,  out_size, kernel_size = 3, padding = 1)
        self.conv2  = nn.Conv2d(out_size, out_size, kernel_size = 3, padding = 1)
        self.up     = nn.UpsamplingBilinear2d(scale_factor = 2)
        self.relu   = nn.ReLU(inplace = True)
        
    def forward(self, input1, input2):
        output  = torch.cat([input1, self.up(input2)], dim=1)
        output  = self.conv1(output)
        output  = self.relu(output)
        output  = self.conv2(output)
        output  = self.relu(output)
        return output
    
class Unet(nn.Module):
    def __init__(self, num_classes=21, pretrained=False, backbone='vgg16'):
        super(Unet, self).__init__()
        if backbone == "vgg16":
            self.backbone = VGG16(pretrained=pretrained)
            self.in_filters  = [192, 384, 768, 1024]
        elif backbone == "resnet50":
            self.backbone = resnet50(pretrained=pretrained)
            self.in_filters  = [192, 512, 1024, 3072]
        else:
            raise ValueError('Unsupported backbone - `{}`, Use vgg16, resnet50.'.format(backbone))
        self.out_filters = [64, 128, 256, 512]
        
        self.up_concat4 = UnetBlock(self.in_filters[3], self.out_filters[3])
        self.up_concat3 = UnetBlock(self.in_filters[2], self.out_filters[2])
        self.up_concat2 = UnetBlock(self.in_filters[1], self.out_filters[1])
        self.up_concat1 = UnetBlock(self.in_filters[0], self.out_filters[0])
        
        if backbone == 'resnet50':
            self.up_conv = nn.Sequential(
                nn.UpsamplingBilinear2d(scale_factor = 2), 
                nn.Conv2d(self.out_filters[0], self.out_filters[0], kernel_size = 3, padding = 1),
                nn.ReLU(),
                nn.Conv2d(self.out_filters[0], self.out_filters[0], kernel_size = 3, padding = 1),
                nn.ReLU(),
            )
        else:
            self.up_conv = None

        self.final    = nn.Conv2d(self.out_filters[0], num_classes, 1)       # 1x1的卷积只改变通道
        # self.backbone = backbone
        
    def forward(self, x):
        """
        @case vgg16:
            @feat1 torch.Size([1, 64, 512, 512]), 
            @feat2 torch.Size([1, 128, 256, 256]), 
            @feat3 torch.Size([1, 256, 128, 128]), 
            @feat4 torch.Size([1, 512, 64, 64]), 
            @feat5 torch.Size([1, 512, 32, 32])]
    
        @case resnet50: 
            @feat1 torch.Size([1, 64, 112, 112])
            @feat2 torch.Size([1, 256, 56, 56]), 
            @feat3 torch.Size([1, 512, 28, 28])
            @feat4 torch.Size([1, 1024, 14, 14]), 
            @feat5 torch.Size([1, 2048, 7, 7])]
        """
        
        # 以vgg16为例
        feat1, feat2, feat3, feat4, feat5 = self.backbone(x)
        up4 = self.up_concat4(feat4, feat5)        # torch.Size([1, 512, 64, 64])
        up3 = self.up_concat3(feat3, up4)          # torch.Size([1, 256, 128, 128])
        up2 = self.up_concat2(feat2, up3)          # torch.Size([1, 128, 256, 256])
        up1 = self.up_concat1(feat1, up2)          # torch.Size([1, 64, 512, 512])

        if self.up_conv:
            up1 = self.up_conv(up1)
        res = self.final(up1)                      # torch.Size([1, 21, 512, 512])
        return res
        

    def freeze_backbone(self):
        for param in self.backbone.parameters():
            param.requires_grad  = False
    
    def unfreeze_backbone(self):
        for param in self.backbone.parameters():
            param.requires_grad  = True

if __name__ == "__main__":
    unet   = Unet()
    
    input  = torch.randn((1, 3, 512, 512), dtype=torch.float32)
    output = unet(input)
    print(output.shape)
        
    # torch.onnx.export(unet, args=(input,), f = "unet.onnx",
    #                 input_names   = ['input'],
    #                 output_names  = ['output'],
    #                 opset_version = 12,
    #                 dynamic_axes  = None
    #             )