import torch
import torch.nn as nn
import torch.nn.functional as F
from math import log
from net.res2net import res2net50_v1b_26w_4s
from net.swin_transformer import SwinTransformer
from net.DyConv import Dynamic_conv2d  
from net.CoTlayer import CotLayer

# 正常情况下尺寸大小不变，用于Channel改变
class ConvBNR(nn.Module):
    def __init__(self, inchannels, outchannels, kernel_size=3, stride=1, dilation=1, bias=False):
        super(ConvBNR, self).__init__()

        self.block = nn.Sequential(
            nn.Conv2d(inchannels, outchannels, kernel_size, stride=stride, padding=dilation, dilation=dilation, bias=bias),
            nn.BatchNorm2d(outchannels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.block(x)

# 1x1卷积，认为是等位置通道信息融合和语义扩充，加入动态卷积，有注意力机制
class Conv1x1(nn.Module):
    def __init__(self, inchannels, outchannels):
        super(Conv1x1, self).__init__()
        self.conv = Dynamic_conv2d(inchannels, outchannels, 1)  #
        self.bn = nn.BatchNorm2d(outchannels)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)

        return x

# Classfication fine-tune Moudle
class CFM(nn.Module):
    """ wh指feature map的边长 """
    def __init__(self, inchannels, outchannels, wh):
        super(CFM, self).__init__()
        self.pool = nn.AvgPool2d(kernel_size=2, stride=2)
        self.conv = ConvBNR(inchannels,outchannels)
        self.classifier = nn.Linear(int(wh*wh*outchannels/4), 78)

    def forward(self,x):
        x1 = self.conv(self.pool(x))
        x2 = x1.view(x1.size(0), -1)
        out = self.classifier(x2)
        return x1, out

# Detail Generate Module 
class DGM(nn.Module):
    def __init__(self):
        super(DGM,self).__init__()
        self.deDim1 = Conv1x1(256, 64)  # classfication stream
        self.deDim2 = Conv1x1(256, 256) # detail stream
        self.fusionblock = nn.Sequential(
            ConvBNR(256 + 64, 256, 3),
            ConvBNR(256, 256, 3))
        self.deDim3 = nn.Conv2d(256, 1, 1)
            
    def forward(self,x_c, x_e):   # output shape is [he,we]
        size = x_e.size()[2:]  # take from index 2 for upsample   [bs,c,h,w]
        x1 = self.deDim1(x_c)
        x2 = self.deDim2(x_e)
        x1 = F.interpolate(x1, size, mode='bilinear', align_corners=False)
        out = torch.cat((x1, x2), dim=1)
        out = self.fusionblock(out)
        detail = self.deDim3(out)
        return out,detail

class ResOutBlock(nn.Module):
    def __init__(self ):
        super(ResOutBlock,self).__init__()
        self.AlphaGenerateBlock = nn.Sequential(
            Dynamic_conv2d(2, 1, kernel_size=3, stride=1, padding=1, dilation=1, bias=False),
            nn.BatchNorm2d(1),
            nn.Sigmoid()
        )
            
    def forward(self,cnn_out, trf_out):   # output shape is [he,we]
        alpha = self.AlphaGenerateBlock(torch.cat((cnn_out, trf_out), dim=1))
        out = alpha * cnn_out + (1-alpha)*trf_out
        return out
        
class Cot_Conv(nn.Module):
    def __init__(self, inchannels, outchannels):
        super(Cot_Conv,self).__init__()
        std = int(inchannels/32)*32
        self.cot_conv = nn.Sequential(
            ConvBNR(inchannels, std, 3),
            CotLayer(std,3),
            Conv1x1(std, outchannels),
            nn.BatchNorm2d(1)
        )
            
    def forward(self,trf_out):
        out = self.cot_conv(trf_out)
        return out


## 模型输入大小为 448*448
class SemanticNet(nn.Module):
    def __init__(self):
        super(SemanticNet, self).__init__()
        self.resnet = res2net50_v1b_26w_4s(pretrained=True)
        # if self.training:
        # self.initialize_weights()
        self.wh = 14   #finetune 输入二维特征图的边长
        self.transformer_size = [224,224]
        self.cfm = CFM(2048, 256,self.wh)

        self.dgm = DGM()

        self.convC = Conv1x1(256, 256)

        self.convDF = Conv1x1(512, 256)
        self.convCF = Conv1x1(768, 256)

        self.swin_transformer = SwinTransformer(img_size=224, patch_size=4, in_chans=512,depths=[2, 2, 6, 2]) # other params default
        
        self.convLast = Cot_Conv(219, 1)
        self.resout = ResOutBlock()

    def forward(self,x):
        x1, x2, x3, x4 = self.resnet(x)   # x1: 112*112*256   x2: 56*56*512   x3: 28*28*1024   x4: 14*14*2048

        x5, classifier = self.cfm(x4)  # x5 是类别特征图 7*7*256        classifier是分类结果 78*1
        x6 = self.convC(x5)      
        x6 = F.interpolate(x6, x2.size()[2:], mode='bilinear', align_corners=False) # x6: 56*56*256
        
        detail_information, detail = self.dgm(x5, x1)                                        # detail: 112*112*1
        detail_map = torch.sigmoid(detail) # edge_map is 0-1 for dice loss

        
        detail_information = torch.cat((detail_information,x1), dim=1)   # 112*112*(256+256)
        
        cls = torch.cat((x6, x2), dim=1)               
        cls = self.convCF(cls)     # 56*56*128
        cls = F.interpolate(cls, x1.size()[2:], mode='bilinear', align_corners=False)#  112*112*256
        
        detail_cls_pixel = torch.cat((self.convDF(detail_information),cls),dim=1)   # 112*112*512

        detail_cls_pixel = F.interpolate(detail_cls_pixel, self.transformer_size, mode='bilinear', align_corners=False) # 224*224*512

        sw_out = self.swin_transformer(detail_cls_pixel)
        sw1 = F.interpolate(sw_out[0], x1.size()[2:], mode='bilinear', align_corners=False)
        sw2 = F.interpolate(sw_out[1], x1.size()[2:], mode='bilinear', align_corners=False)
        sw3 = F.interpolate(sw_out[2], x1.size()[2:], mode='bilinear', align_corners=False)

        trf_out = torch.cat((torch.cat((sw1, sw2), dim=1),sw3),dim=1)
        out = self.resout(self.convLast(trf_out),detail)

        return classifier, detail_map,out 
