# -*- coding: utf-8 -*-


import torch
from torchvision import models
import torch.nn as nn
from model.resnet import resnet34, resnet101
from torch.nn import functional as F
import torchsummary
from torch.nn import init
from model.Aspp import build_aspp

up_kwargs = {'mode': 'bilinear', 'align_corners': True}


class BACPFNet(nn.Module):
    #  'BaseNet': CPFNet(out_planes=args.num_classes)
    def __init__(self, out_planes=1, ccm=True, norm_layer=nn.BatchNorm2d, is_training=True, expansion=2,
                 base_channel=32):
        super(BACPFNet, self).__init__()
        self.out_chanel = out_planes

        self.network = "resnet34"
        # self.network = "resnet101"

        if self.network == "resnet34":
            self.backbone = resnet34(pretrained=True)
            self.in_c = [64, 64, 128, 256, 512]
        elif self.network == "resnet101":
            self.backbone = resnet101(pretrained=True)
            self.in_c = [64, 256, 512, 1024, 2048]

        c2_channels = self.in_c[-4]
        c3_channel = self.in_c[-3]
        c4_channel = self.in_c[-2]
        c5_channels = self.in_c[-1]

        self.fus_head2 = FusHead(inplane=c2_channels)
        self.fus_head5 = FusHead(inplane=c5_channels)
        self.fus_head3 = FusHead(inplane=c3_channel)

        self.head = _DeepLabHead_attention(1, c1_channels=c2_channels, c4_channels=c5_channels, c2_channel=c3_channel)
        self.head_b = _DeepLabHead(1, c1_channels=c2_channels, c4_channels=c5_channels)


    def forward(self, x):
        size = x.size()[2:]
        x = self.backbone.conv1(x)
        x = self.backbone.bn1(x)
        x1 = self.backbone.relu(x)  # 1/2      64, 128, 128
        x = self.backbone.maxpool(x1)

        x2 = self.backbone.layer1(x)  # 1/4      64, 64, 64
        x3 = self.backbone.layer2(x2)  # 1/8      128, 32, 32
        x4 = self.backbone.layer3(x3)  # 1/8      256, 32, 32
        x5 = self.backbone.layer4(x4)  # 1/8      512, 32, 32

        x_b = self.head_b(x5, x2)
        attention_map = x_b.sigmoid()

        x2 = self.fus_head2(x2, attention_map)
        x5 = self.fus_head5(x5, attention_map)
        x3 = self.fus_head3(x3, attention_map)

        x = self.head(x5, x3, x2, attention_map)

        x = F.interpolate(x, size, mode='bilinear', align_corners=True)
        x_b = F.interpolate(x_b, size, mode='bilinear', align_corners=True)

        x = x.sigmoid()
        x_b = x_b.sigmoid()

        # print(x.size())
        # print(x_b.size())

        return x, x_b

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)
            # m.weight.data.fill_(1)
            # m.bias.data.zero_()=2-0


class _DeepLabHead_attention(nn.Module):
    def __init__(self, nclass, c1_channels=256, c4_channels=2048, c2_channel=512, norm_layer=nn.BatchNorm2d):
        super(_DeepLabHead_attention, self).__init__()
        # self.use_aspp = cfg.MODEL.DEEPLABV3_PLUS.USE_ASPP
        # self.use_decoder = cfg.MODEL.DEEPLABV3_PLUS.ENABLE_DECODER
        self.use_aspp = True
        self.use_decoder = True
        last_channels = c4_channels
        if self.use_aspp:
            self.aspp = _ASPP(c4_channels, 256)
            last_channels = 256
        if self.use_decoder:
            self.c1_block = _ConvBNReLU(c1_channels, 48, 1, norm_layer=norm_layer)
            last_channels += 48

            self.c2_block = _ConvBNReLU(c2_channel, 24, 1, norm_layer=norm_layer)
            last_channels += 24

        self.block = nn.Sequential(
            SeparableConv2d(256+24+48, 256, 3, norm_layer=norm_layer, relu_first=False),
            SeparableConv2d(256, 256, 3, norm_layer=norm_layer, relu_first=False),
            nn.Conv2d(256, nclass, 1))

        self.block_c2 = nn.Sequential(
            SeparableConv2d(256+24, 256+24, 3, norm_layer=norm_layer, relu_first=False),
            SeparableConv2d(256+24, 256+24, 3, norm_layer=norm_layer, relu_first=False))


        self.fus_head_c2 = FusHead(inplane=256+24)
        self.fus_head_c1 = FusHead(inplane=256+24+48)


    def forward(self, x, c2, c1, attention_map):
        c1_size = c1.size()[2:]
        c2_size = c2.size()[2:]
        if self.use_aspp:
            x = self.aspp(x)
        if self.use_decoder:
            x = F.interpolate(x, c2_size, mode='bilinear', align_corners=True)
            c2 = self.c2_block(c2)
            x = torch.cat([x, c2], dim=1)
            x = self.fus_head_c2(x, attention_map)
            x = self.block_c2(x)

            x = F.interpolate(x, c1_size, mode='bilinear', align_corners=True)
            c1 = self.c1_block(c1)
            x = torch.cat([x, c1], dim=1)
            x = self.fus_head_c1(x, attention_map)
            return self.block(x)

        return self.block(x)


class FusHead(nn.Module):
    def __init__(self, norm_layer=nn.BatchNorm2d, inplane=256):
        super(FusHead, self).__init__()
        self.conv1 = SeparableConv2d(inplane*2, inplane, 3, norm_layer=norm_layer, relu_first=False)
        self.fc1 = nn.Conv2d(inplane, inplane // 16, kernel_size=1)
        self.fc2 = nn.Conv2d(inplane // 16, inplane, kernel_size=1)

    def forward(self, c, att_map):
        if c.size() != att_map.size():
            att_map  = F.interpolate(att_map, c.size()[2:], mode='bilinear', align_corners=True)

        atted_c = c * att_map
        x = torch.cat([c, atted_c], 1)#512
        x = self.conv1(x) #256

        weight = F.avg_pool2d(x, x.size(2))
        weight = F.relu(self.fc1(weight))
        weight = torch.sigmoid(self.fc2(weight))
        x = x * weight
        return x


class _DeepLabHead(nn.Module):
    def __init__(self, nclass, c1_channels=256, c4_channels=2048, norm_layer=nn.BatchNorm2d):
        super(_DeepLabHead, self).__init__()
        # self.use_aspp = cfg.MODEL.DEEPLABV3_PLUS.USE_ASPP
        # self.use_decoder = cfg.MODEL.DEEPLABV3_PLUS.ENABLE_DECODER
        self.use_aspp = True
        self.use_decoder = True
        last_channels = c4_channels
        if self.use_aspp:
            self.aspp = _ASPP(c4_channels, 256)
            last_channels = 256
        if self.use_decoder:
            self.c1_block = _ConvBNReLU(c1_channels, 48, 1, norm_layer=norm_layer)
            last_channels += 48
        self.block = nn.Sequential(
            SeparableConv2d(last_channels, 256, 3, norm_layer=norm_layer, relu_first=False),
            SeparableConv2d(256, 256, 3, norm_layer=norm_layer, relu_first=False),
            nn.Conv2d(256, nclass, 1))

    def forward(self, x, c1):
        size = c1.size()[2:]
        if self.use_aspp:
            x = self.aspp(x)
        if self.use_decoder:
            x = F.interpolate(x, size, mode='bilinear', align_corners=True)
            c1 = self.c1_block(c1)
            cat_fmap = torch.cat([x, c1], dim=1)
            return self.block(cat_fmap)

        return self.block(x)


# -----------------------------------------------------------------
#                      For deeplab
# -----------------------------------------------------------------
class _ASPP(nn.Module):
    def __init__(self, in_channels=2048, out_channels=256):
        super().__init__()
        output_stride = 8
        if output_stride == 16:
            dilations = [6, 12, 18]
        elif output_stride == 8:
            dilations = [12, 24, 36]
        elif output_stride == 32:
            dilations = [6, 12, 18]
        else:
            raise NotImplementedError

        self.aspp0 = nn.Sequential(nn.Conv2d(in_channels, out_channels, 1, bias=False),
                                   nn.BatchNorm2d(out_channels),
                                   nn.ReLU(inplace=True))
        self.aspp1 = SeparableConv2d(in_channels, out_channels, dilation=dilations[0], relu_first=False)
        self.aspp2 = SeparableConv2d(in_channels, out_channels, dilation=dilations[1], relu_first=False)
        self.aspp3 = SeparableConv2d(in_channels, out_channels, dilation=dilations[2], relu_first=False)

        self.image_pooling = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),
                                           nn.Conv2d(in_channels, out_channels, 1, bias=False),
                                           nn.BatchNorm2d(out_channels),
                                           nn.ReLU(inplace=True))

        self.conv = nn.Conv2d(out_channels*5, out_channels, 1, bias=False)
        self.bn = nn.BatchNorm2d(out_channels)
        self.relu = nn.ReLU(inplace=True)
        self.dropout = nn.Dropout2d(p=0.1)

    def forward(self, x):
        pool = self.image_pooling(x)
        pool = F.interpolate(pool, size=x.shape[2:], mode='bilinear', align_corners=True)

        x0 = self.aspp0(x)
        x1 = self.aspp1(x)
        x2 = self.aspp2(x)
        x3 = self.aspp3(x)
        x = torch.cat((pool, x0, x1, x2, x3), dim=1)

        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)
        x = self.dropout(x)

        return x


class _ConvBNReLU(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0,
                 dilation=1, groups=1, relu6=False, norm_layer=nn.BatchNorm2d):
        super(_ConvBNReLU, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias=False)
        self.bn = norm_layer(out_channels)
        self.relu = nn.ReLU6(True) if relu6 else nn.ReLU(True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)
        return x


class SAPblock(nn.Module):
   def __init__(self, in_channels):
      super(SAPblock, self).__init__()
      self.conv3x3 = nn.Conv2d(in_channels=in_channels, out_channels=in_channels, dilation=1, kernel_size=3,
                         padding=1)

      self.bn = nn.ModuleList([nn.BatchNorm2d(in_channels), nn.BatchNorm2d(in_channels), nn.BatchNorm2d(in_channels)])
      self.conv1x1 = nn.ModuleList(
         [nn.Conv2d(in_channels=2 * in_channels, out_channels=in_channels, dilation=1, kernel_size=1, padding=0),
          nn.Conv2d(in_channels=2 * in_channels, out_channels=in_channels, dilation=1, kernel_size=1, padding=0)])
      self.conv3x3_1 = nn.ModuleList(
         [nn.Conv2d(in_channels=in_channels, out_channels=in_channels // 2, dilation=1, kernel_size=3, padding=1),
          nn.Conv2d(in_channels=in_channels, out_channels=in_channels // 2, dilation=1, kernel_size=3, padding=1)])
      self.conv3x3_2 = nn.ModuleList(
         [nn.Conv2d(in_channels=in_channels // 2, out_channels=2, dilation=1, kernel_size=3, padding=1),
          nn.Conv2d(in_channels=in_channels // 2, out_channels=2, dilation=1, kernel_size=3, padding=1)])
      self.conv_last = ConvBnRelu(in_planes=in_channels, out_planes=in_channels, ksize=1, stride=1, pad=0, dilation=1)

      self.gamma = nn.Parameter(torch.zeros(1))

      self.relu = nn.ReLU(inplace=True)

   def forward(self, x):
      x_size = x.size()

      branches_1 = self.conv3x3(x)
      branches_1 = self.bn[0](branches_1)

      branches_2 = F.conv2d(x, self.conv3x3.weight, padding=2, dilation=2)  # share weight
      branches_2 = self.bn[1](branches_2)

      branches_3 = F.conv2d(x, self.conv3x3.weight, padding=4, dilation=4)  # share weight
      branches_3 = self.bn[2](branches_3)

      feat = torch.cat([branches_1, branches_2], dim=1)
      # feat=feat_cat.detach()
      feat = self.relu(self.conv1x1[0](feat))
      feat = self.relu(self.conv3x3_1[0](feat))
      att = self.conv3x3_2[0](feat)
      att = F.softmax(att, dim=1)

      att_1 = att[:, 0, :, :].unsqueeze(1)
      att_2 = att[:, 1, :, :].unsqueeze(1)

      fusion_1_2 = att_1 * branches_1 + att_2 * branches_2

      feat1 = torch.cat([fusion_1_2, branches_3], dim=1)
      # feat=feat_cat.detach()
      feat1 = self.relu(self.conv1x1[0](feat1))
      feat1 = self.relu(self.conv3x3_1[0](feat1))
      att1 = self.conv3x3_2[0](feat1)
      att1 = F.softmax(att1, dim=1)

      att_1_2 = att1[:, 0, :, :].unsqueeze(1)
      att_3 = att1[:, 1, :, :].unsqueeze(1)

      ax = self.relu(self.gamma * (att_1_2 * fusion_1_2 + att_3 * branches_3) + (1 - self.gamma) * x)
      ax = self.conv_last(ax)

      return ax


class Decoder(nn.Module):
    def __init__(self, in_channels, bos_c, in_de_c=256, task_type=False):
        super(Decoder, self).__init__()
        self.task_type = task_type

        self.fuse_x5_bos5 = DecoderUnit(in_channels[-1], bos_c, in_de_c, 32)
        self.fuse_x4_x5 = DecoderUnit(in_channels[-2], in_channels[-1], in_de_c, 32)
        self.fuse_x3_x4 = DecoderUnit(in_channels[-3], in_channels[-2], in_de_c, 32)
        self.fuse_x2_x3 = DecoderUnit(in_channels[-4], in_channels[-3], in_de_c, 64)

    # self.fuse_x1_x2 = DecoderUnit(in_channels[-5], in_channels[-4], in_de_c)

    def forward(self, bos5, *inputs):
        # inputs = [ed1 ed2 ed3 ed4 ed5]   bos5
        d5 = self.fuse_x5_bos5(inputs[-1], bos5)
        d4 = self.fuse_x4_x5(inputs[-2], d5)
        d3 = self.fuse_x3_x4(inputs[-3], d4)
        d2 = self.fuse_x2_x3(inputs[-4], d3)
        # d1 = self.fuse_x1_x2(inputs[-5], d2)

        if self.task_type:
            return d2, d3, d4, d5
        elif not self.task_type:
            return d2, d3, d4, d5


class DecoderUnit(nn.Module):
    def __init__(self, pre_c, back_c, in_de_c, pool_size):
        super(DecoderUnit, self).__init__()
        self.out_c = pre_c
        # print(self.out_c/4)
        self.conv_1x1_back = nn.Sequential(
            nn.Conv2d(back_c, self.out_c, 1, bias=False),
            nn.BatchNorm2d(self.out_c),
            nn.ReLU(inplace=True))

        self.conv_1x1_pre = nn.Sequential(
            nn.Conv2d(pre_c, self.out_c, 1, bias=False),
            nn.BatchNorm2d(self.out_c),
            nn.ReLU(inplace=True))

        self.conv3x3 = nn.Sequential(
            nn.Conv2d(2 * self.out_c, self.out_c, 3, padding=1, bias=False),
            nn.BatchNorm2d(self.out_c),
            nn.ReLU(inplace=True))

        self.sigmoid = torch.nn.Sigmoid()

        self.pool = nn.AvgPool2d(pool_size)

        self.att_1x1_1 = nn.Sequential(
            nn.Conv2d(self.out_c, int(self.out_c / 16), 1, padding=0, bias=False),
            nn.ReLU(inplace=True))
        self.att_1x1_2 = nn.Sequential(
            nn.Conv2d(int(self.out_c / 16), self.out_c, 1, padding=0, bias=False))

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, pre, back):
        # 边界的解码器的需要降通道，分割的通道在BaSm里面已经降到256
        # 首先,对编码器特征降通道到 ,包括back特征和pre特征
        # 对高级特征上采样   up_kwargs = {'mode': 'bilinear', 'align_corners': True}    size()[2:]
        # concat拼接
        # 卷积降通道  3*3融合
        # sigmoid自注意力
        if back.size()[1] != self.out_c:
            back = self.conv_1x1_back(back)

        if back.size()[2:] != pre.size()[2:]:
            back = F.interpolate(back, pre.size()[2:], **up_kwargs)

        if pre.size()[1] != self.out_c:
            pre = self.conv_1x1_pre(pre)

        fusion = torch.cat([pre, back], dim=1)
        fusion = self.conv3x3(fusion)
        pool_att = self.pool(fusion)
        pool_att = self.att_1x1_1(pool_att)
        pool_att = self.att_1x1_2(pool_att)
        pool_att = self.sigmoid(pool_att)
        fusion = pre * pool_att + back
        return fusion


class BaSm(nn.Module):
    def __init__(self, inchannels, in_de_c):
        super(BaSm, self).__init__()
        self.conv1x1 = nn.Sequential(
            nn.Conv2d(inchannels, in_de_c, 3, padding=1, bias=False),
            nn.BatchNorm2d(in_de_c),
            nn.ReLU(inplace=True))

        self.sigmoid = nn.Sigmoid()
        self.conv3x3 = nn.Conv2d(in_de_c + 1, in_de_c, 3, padding=1, bias=False)
        self.dropout = nn.Dropout(0.1)
        self.fc1 = nn.Conv2d(in_de_c, in_de_c // 16, kernel_size=1)
        self.fc2 = nn.Conv2d(in_de_c // 16, in_de_c, kernel_size=1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, edge, x):
        edge = F.interpolate(edge, x.size()[2:], mode='bilinear', align_corners=True)
        x2 = edge * x
        fusion = torch.cat([x2, edge], dim=1)
        fusion = self.conv3x3(fusion)
        w = F.avg_pool2d(fusion, fusion.size(2))
        w = F.relu(self.fc1(w))
        w = F.sigmoid(self.fc2(w))
        fusion = w * fusion
        return fusion


class BaseNetHead(nn.Module):
    #                  64            1              是否中间缩一下通道数到32
    # BaseNetHead(spatial_ch[0], out_planes, 2, is_aux=False, norm_layer=norm_layer)
    def __init__(self, in_planes, out_planes, scale, is_aux=False, norm_layer=nn.BatchNorm2d):
        super(BaseNetHead, self).__init__()
        if is_aux:
            self.conv_1x1_3x3 = nn.Sequential(
                ConvBnRelu(in_planes, 64, 1, 1, 0, has_bn=True, norm_layer=norm_layer,
                           has_relu=True, has_bias=False),
                ConvBnRelu(64, 64, 3, 1, 1, has_bn=True, norm_layer=norm_layer,
                           has_relu=True, has_bias=False))
        else:
            self.conv_1x1_3x3 = nn.Sequential(
                ConvBnRelu(in_planes, 32, 1, 1, 0,
                           has_bn=True, norm_layer=norm_layer,
                           has_relu=True, has_bias=False),
                ConvBnRelu(32, 32, 3, 1, 1,
                           has_bn=True, norm_layer=norm_layer,
                           has_relu=True, has_bias=False))
        self.dropout = nn.Dropout(0.1)
        if is_aux:
            self.conv_1x1_2 = nn.Conv2d(64, out_planes, kernel_size=1,
                                        stride=1, padding=0)
        else:
            self.conv_1x1_2 = nn.Conv2d(32, out_planes, kernel_size=1,
                                        stride=1, padding=0)
        self.scale = scale

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, x):
        if self.scale > 1:
            x = F.interpolate(x, scale_factor=self.scale, mode='bilinear', align_corners=True)
        fm = self.conv_1x1_3x3(x)
        # fm = self.dropout(fm)
        output = self.conv_1x1_2(fm)
        return output


class ConvBnRelu(nn.Module):
    def __init__(self, in_planes, out_planes, ksize, stride, pad, dilation=1,
                 groups=1, has_bn=True, norm_layer=nn.BatchNorm2d,
                 has_relu=True, inplace=True, has_bias=False):
        super(ConvBnRelu, self).__init__()

        self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=ksize,
                              stride=stride, padding=pad,
                              dilation=dilation, groups=groups, bias=has_bias)
        self.has_bn = has_bn
        if self.has_bn:
            self.bn = nn.BatchNorm2d(out_planes)
        self.has_relu = has_relu
        if self.has_relu:
            self.relu = nn.ReLU(inplace=inplace)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_uniform_(m.weight.data)
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight.data, 1.0, 0.02)
                init.constant_(m.bias.data, 0.0)

    def forward(self, x):
        x = self.conv(x)
        if self.has_bn:
            x = self.bn(x)
        if self.has_relu:
            x = self.relu(x)

        return x


class SeparableConv2d(nn.Module):
    def __init__(self, inplanes, planes, kernel_size=3, stride=1, dilation=1, relu_first=True,
                 bias=False, norm_layer=nn.BatchNorm2d):
        super().__init__()
        depthwise = nn.Conv2d(inplanes, inplanes, kernel_size,
                              stride=stride, padding=dilation,
                              dilation=dilation, groups=inplanes, bias=bias)
        bn_depth = norm_layer(inplanes)
        pointwise = nn.Conv2d(inplanes, planes, 1, bias=bias)
        bn_point = norm_layer(planes)

        if relu_first:
            self.block = nn.Sequential(nn.ReLU(),depthwise,bn_depth,pointwise,bn_point)
        else:
            self.block = nn.Sequential(depthwise,bn_depth,nn.ReLU(inplace=True),
                                       pointwise,bn_point,nn.ReLU(inplace=True))

    def forward(self, x):
        return self.block(x)


BaseNet_version = "V12"

if __name__ == '__main__':
    import os
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'

    BaseNet_version = "V1"
    print(BaseNet_version)
    print(" Using V1 : transparent")

    model = BACPFNet()
    # print(model)
    # model = torch.nn.DataParallel(model).cuda()
    model.cuda()
    with torch.no_grad():
        model.eval()
        torchsummary.summary(model, (3, 256, 256))
