import torch
import torch.nn as nn
import torch.nn.functional as F

from .bconv import Bconv
from .cfm import CFM
from .fam import FAM
from .pdc import PDC
from .resnet import ResNet18,init_weight
from .rf import RF
from .sa import SA
from .sca import SpatialAttention, ChannelwiseAttention

class MyNet2(nn.Module):
    def __init__(self):
        super(MyNet2, self).__init__()
        self.resnet = ResNet18()
        init_weight(self.resnet)
        self.downsample2 = nn.MaxPool2d(2, stride=2)

        # self.att1 = SpatialAttention(768, 5)
        self.rf1 = RF(768,256)

        # self.att2 = SpatialAttention(384, 9)
        self.rf2 = RF(384,128)

        # self.att3 = ChannelwiseAttention(256)
        self.rf3 = RF(256,64)

        self.fam0 = FAM(512,256)
        self.fam1 = FAM(256,128,True)
        self.fam2 = FAM(128,64,True)
        self.fam3 = FAM(64,64,True)

        self.conv1x1 = nn.Conv2d(64,1,1,1,0)
        self.SA = Bconv(192,128,3,padding=1)
        self.famout = FAM(128,1)

        self.famout1 = FAM(64,1)
        self.famout2 = FAM(64,1)
        self.famout3 = FAM(128,1)
        self.famout4 = FAM(256,1)

        self.mask_conv = nn.Conv2d(1,5,3,padding=1)

    def forward(self,x):
        resnet_dict = self.resnet(x)
        x0 = resnet_dict['X0']      # 64 88 88
        x1 = resnet_dict['X1']      # 64 88 88
        x2 = resnet_dict['X2']      # 128 44 44
        x3 = resnet_dict['X3']      # 256 22 22
        x4 = resnet_dict['X4']      # 512 11 11

        up_x4 = F.interpolate(x4, x3.size()[2:], mode='bilinear', align_corners=True)
        x34 = torch.cat((x3,up_x4),1)
        x34 = self.rf1(x34)     # 256 22 22

        up_x34 = F.interpolate(x34, x2.size()[2:], mode='bilinear', align_corners=True)
        x234 = torch.cat((x2,up_x34),1)
        x234 = self.rf2(x234)       # 128 44 44

        x01 = torch.cat((x0,x1),1)
        x01 = self.downsample2(x01)
        x01234 = torch.cat((x01,x234),1)
        x01234 = self.rf3(x01234)

        out4 = self.fam0(x4)                # 256 11 11
        out3 = self.fam1(out4,x34)          # 128 22 22
        out2 = self.fam2(out3,x234)         # 64 44 44
        out1 = self.fam3(out2,x01234)       # 64 44 44

        # a new question
        # out = self.conv1x1(out1)        # 64 44 44
        out = out1
        out = torch.cat((out,x234),dim=1) # 192, 44, 44
        out = self.SA(out)
        out = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=True)(out)
        out = self.famout(out)

        out1 = self.famout1(out1)
        out2 = self.famout2(out2)
        out3 = self.famout3(out3)
        out4 = self.famout4(out4)

        out1 = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=True)(out1)
        out2 = nn.Upsample(scale_factor=8, mode='bilinear', align_corners=True)(out2)
        out3 = nn.Upsample(scale_factor=16, mode='bilinear', align_corners=True)(out3)
        out4 = nn.Upsample(scale_factor=32, mode='bilinear', align_corners=True)(out4)

        mask = self.mask_conv(out)
        mask = F.softmax(mask, dim=1)
        out = torch.cat((out,out1,out2,out3,out4), dim=1) * mask
        out = torch.sum(out, dim=1, keepdim=True)
        # print(mask.shape, out.shape)

        return out, out1, out2, out3, out4