"""  
Copyright (c) 2019-present NAVER Corp.
MIT License
"""

# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.nn.functional as F

from basenet.vgg16_bn import vgg16_bn, init_weights

class double_conv(nn.Module):
    def __init__(self, in_ch, mid_ch, out_ch):
        super(double_conv, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(in_ch + mid_ch, mid_ch, kernel_size=1),
            nn.BatchNorm2d(mid_ch),
            nn.ReLU(inplace=True),
            nn.Conv2d(mid_ch, out_ch, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_ch),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        x = self.conv(x)
        return x


class CRAFT(nn.Module):
    def __init__(self, model_config:dict):
        super(CRAFT, self).__init__()
        pretrained=model_config['pretrained']
        freeze=model_config['freeze']
        """ Base network """
        self.basenet = vgg16_bn(pretrained, freeze)

        """ U network """
        self.upconv1 = double_conv(1024, 512, 256)
        self.upconv2 = double_conv(512, 256, 128)
        self.upconv3 = double_conv(256, 128, 64)
        self.upconv4 = double_conv(128, 64, 32)

        num_class = 1
        self.conv_cls = nn.Sequential(
            nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True),
            nn.Conv2d(32, 32, kernel_size=3, padding=1), nn.ReLU(inplace=True),
            nn.Conv2d(32, 16, kernel_size=3, padding=1), nn.ReLU(inplace=True),
            nn.Conv2d(16, 16, kernel_size=1), nn.ReLU(inplace=True),
            nn.Conv2d(16, num_class, kernel_size=1),
        )

        init_weights(self.upconv1.modules())
        init_weights(self.upconv2.modules())
        init_weights(self.upconv3.modules())
        init_weights(self.upconv4.modules())
        init_weights(self.conv_cls.modules())
        
    def forward(self, x):
        """ Base network """
        sources = self.basenet(x) # sources:'fc7', 'relu5_3', 'relu4_3', 'relu3_2', 'relu2_2'
        # fc7: 1024,48,48(1/16)
        # relu5_3: 512, 48, 48 (1/16)
        # relu4_3 : 512, 96, 96 (1/8)
        # relu3_2: 256, 192, 192 (1/4)
        # relu2_2: 128, 384, 384 (1/2)

        """ U network """
        y = torch.cat([sources[0], sources[1]], dim=1) # y 1536, 48, 48
        y = self.upconv1(y) # 256, 48, 48

        y = F.interpolate(y, size=sources[2].size()[2:], mode='bilinear', align_corners=False)  # 256, 96, 96
        y = torch.cat([y, sources[2]], dim=1) #768, 96, 96
        y = self.upconv2(y) #  128, 96, 96

        y = F.interpolate(y, size=sources[3].size()[2:], mode='bilinear', align_corners=False) # 128, 192, 192
        y = torch.cat([y, sources[3]], dim=1) # 384, 192, 192
        y = self.upconv3(y)# 64, 192, 192

        y = F.interpolate(y, size=sources[4].size()[2:], mode='bilinear', align_corners=False)
        y = torch.cat([y, sources[4]], dim=1) #192, 384, 384
        feature = self.upconv4(y) # 1, 32, 384, 384

        y = self.conv_cls(feature) # 2, 384, 384 更改后为 1, 384,384

        return y.permute(0,2,3,1), feature #[384,384,2]  [32,384,384]

if __name__ == '__main__':
    model = CRAFT( {"pretrained": True,  "freeze":False}).cuda()
    output, _ = model(torch.randn(1, 3, 768, 768).cuda())
    print(output.shape,_.shape )