import torch
import torch.nn as nn
from torch.nn import functional as F
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .cascade_decode_head import BaseCascadeDecodeHead
from torch.nn import init
import math

class ConvX(nn.Module):
    def __init__(self, in_planes, out_planes, kernel=3, stride=1, sync=False):
        super(ConvX, self).__init__()
        self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel, stride=stride, padding=kernel // 2, bias=False)
        if sync:
            self.bn = nn.SyncBatchNorm(out_planes)
        else:
            self.bn = nn.BatchNorm2d(out_planes)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        out = self.relu(self.bn(self.conv(x)))
        return out



class AddBottleneck(nn.Module):
    def __init__(self, in_planes, out_planes, block_num=3, stride=1):
        super(AddBottleneck, self).__init__()
        assert block_num > 1, print("block number should be larger than 1.")
        self.conv_list = nn.ModuleList()
        self.stride = stride
        if stride == 2:
            self.avd_layer = nn.Sequential(
                nn.Conv2d(out_planes // 2, out_planes // 2, kernel_size=3, stride=2, padding=1, groups=out_planes // 2,
                          bias=False),
                nn.BatchNorm2d(out_planes // 2),
            )
            self.skip = nn.Sequential(
                nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=2, padding=1, groups=in_planes, bias=False),
                nn.BatchNorm2d(in_planes),
                nn.Conv2d(in_planes, out_planes, kernel_size=1, bias=False),
                nn.BatchNorm2d(out_planes),
            )
            stride = 1

        for idx in range(block_num):
            if idx == 0:
                self.conv_list.append(ConvX(in_planes, out_planes // 2, kernel=1))
            elif idx == 1 and block_num == 2:
                self.conv_list.append(ConvX(out_planes // 2, out_planes // 2, stride=stride))
            elif idx == 1 and block_num > 2:
                self.conv_list.append(ConvX(out_planes // 2, out_planes // 4, stride=stride))
            elif idx < block_num - 1:
                self.conv_list.append(
                    ConvX(out_planes // int(math.pow(2, idx)), out_planes // int(math.pow(2, idx + 1))))
            else:
                self.conv_list.append(ConvX(out_planes // int(math.pow(2, idx)), out_planes // int(math.pow(2, idx))))

    def forward(self, x):
        out_list = []
        out = x

        for idx, conv in enumerate(self.conv_list):
            if idx == 0 and self.stride == 2:
                out = self.avd_layer(conv(out))
            else:
                out = conv(out)
            out_list.append(out)

        if self.stride == 2:
            x = self.skip(x)

        return torch.cat(out_list, dim=1) + x



class CatBottleneck(nn.Module):
    def __init__(self, in_planes, out_planes, block_num=3, stride=1):
        super(CatBottleneck, self).__init__()
        assert block_num > 1, print("block number should be larger than 1.")
        self.conv_list = nn.ModuleList()
        self.stride = stride
        if stride == 2:
            self.avd_layer = nn.Sequential(
                nn.Conv2d(out_planes // 2, out_planes // 2, kernel_size=3, stride=2, padding=1, groups=out_planes // 2,
                          bias=False),
                nn.BatchNorm2d(out_planes // 2),
            )
            self.skip = nn.AvgPool2d(kernel_size=3, stride=2, padding=1)
            stride = 1

        for idx in range(block_num):
            if idx == 0:
                self.conv_list.append(ConvX(in_planes, out_planes // 2, kernel=1))
            elif idx == 1 and block_num == 2:
                self.conv_list.append(ConvX(out_planes // 2, out_planes // 2, stride=stride))
            elif idx == 1 and block_num > 2:
                self.conv_list.append(ConvX(out_planes // 2, out_planes // 4, stride=stride))
            elif idx < block_num - 1:
                self.conv_list.append(
                    ConvX(out_planes // int(math.pow(2, idx)), out_planes // int(math.pow(2, idx + 1))))
            else:
                self.conv_list.append(ConvX(out_planes // int(math.pow(2, idx)), out_planes // int(math.pow(2, idx))))

    def forward(self, x):
        out_list = []
        out1 = self.conv_list[0](x)

        for idx, conv in enumerate(self.conv_list[1:]):
            if idx == 0:
                if self.stride == 2:
                    out = conv(self.avd_layer(out1))
                else:
                    out = conv(out1)
            else:
                out = conv(out)
            out_list.append(out)

        if self.stride == 2:
            out1 = self.skip(out1)
        out_list.insert(0, out1)

        out = torch.cat(out_list, dim=1)
        return out


class GlobalContextNet(nn.Module):
    def __init__(self, base=64, in_channels=3, layers=[2, 2], block_num=4, type="cat", dropout=0.20, pretrain_model=''):
        super(GlobalContextNet, self).__init__()
        if type == "cat":
            block = CatBottleneck
        elif type == "add":
            block = AddBottleneck
        self.in_channels = in_channels

        self.features = self._make_layers(base, layers, block_num, block)

        self.x2 = nn.Sequential(self.features[:1])
        self.x4 = nn.Sequential(self.features[1:2])
        self.x8 = nn.Sequential(self.features[2:4])
        self.x16 = nn.Sequential(self.features[4:6])
        if pretrain_model:
            print('use pretrain model {}'.format(pretrain_model))
            self.init_weight(pretrain_model)
        else:
            self.init_params()

    def init_weight(self, pretrain_model):

        state_dict = torch.load(pretrain_model)["state_dict"]
        self_state_dict = self.state_dict()
        for k, v in state_dict.items():
            if k == 'features.0.conv.weight' and self.in_channels != 3:
                v = torch.cat([v, v], dim=1)
            self_state_dict.update({k: v})
        self.load_state_dict(self_state_dict, strict=False)

    def init_params(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal_(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal_(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant_(m.bias, 0)

    def _make_layers(self, base, layers, block_num, block):
        features = []
        features += [ConvX(self.in_channels, base // 2, 3, 2)]
        features += [ConvX(base // 2, base, 3, 2)]

        for i, layer in enumerate(layers):
            for j in range(layer):
                if i == 0 and j == 0:
                    features.append(block(base, base * 4, block_num, 2))
                elif j == 0:
                    features.append(block(base * int(math.pow(2, i + 1)), base * int(math.pow(2, i + 2)), block_num, 2))
                else:
                    features.append(block(base * int(math.pow(2, i + 2)), base * int(math.pow(2, i + 2)), block_num, 1))

        return nn.Sequential(*features)

    def forward(self, x):
        feat2 = self.x2(x)
        feat4 = self.x4(feat2)
        feat8 = self.x8(feat4)
        feat16 = self.x16(feat8)

        return feat2, feat4, feat8, feat16

    #x =(2, 3, 224, 224)
    # torch.Size([2, 32, 112, 112])
    # torch.Size([2, 64, 56, 56])
    # torch.Size([2, 256, 28, 28])
    # torch.Size([2, 512, 14, 14])
#################################################################################################################

class SegmentationHead(nn.Module):
    def __init__(self, conv_cfg, norm_cfg, act_cfg, in_channels, mid_channels, n_classes, *args, **kwargs):
        super(SegmentationHead, self).__init__()

        self.conv_bn_relu = ConvModule(in_channels, mid_channels, 3,
                                       stride=1,
                                       padding=1,
                                       conv_cfg=conv_cfg,
                                       norm_cfg=norm_cfg,
                                       act_cfg=act_cfg)

        self.conv_out = nn.Conv2d(mid_channels, n_classes, kernel_size=1, bias=True)

    def forward(self, x):
        x = self.conv_bn_relu(x)
        x = self.conv_out(x)
        return x

class Lap_Pyramid_Conv(nn.Module):
    def __init__(self, num_high=3, gauss_chl=3):
        super(Lap_Pyramid_Conv, self).__init__()

        self.num_high = num_high
        self.gauss_chl = gauss_chl
        self.kernel = self.gauss_kernel()

    def gauss_kernel(self, device=torch.device('cuda')):
        kernel = torch.tensor([[1., 4., 6., 4., 1],
                               [4., 16., 24., 16., 4.],
                               [6., 24., 36., 24., 6.],
                               [4., 16., 24., 16., 4.],
                               [1., 4., 6., 4., 1.]])
        kernel /= 256.
        kernel = kernel.repeat(self.gauss_chl, 1, 1, 1)
        kernel = kernel.to(device)
        return kernel

    def downsample(self, x):
        return x[:, :, ::2, ::2]

    def upsample(self, x):
        cc = torch.cat([x, torch.zeros(x.shape[0], x.shape[1], x.shape[2], x.shape[3], device=x.device)], dim=3)
        cc = cc.view(x.shape[0], x.shape[1], x.shape[2] * 2, x.shape[3])
        cc = cc.permute(0, 1, 3, 2)
        cc = torch.cat([cc, torch.zeros(x.shape[0], x.shape[1], x.shape[3], x.shape[2] * 2, device=x.device)], dim=3)
        cc = cc.view(x.shape[0], x.shape[1], x.shape[3] * 2, x.shape[2] * 2)
        x_up = cc.permute(0, 1, 3, 2)
        return self.conv_gauss(x_up, 4 * self.kernel)

    def conv_gauss(self, img, kernel):
        img = torch.nn.functional.pad(img, (2, 2, 2, 2), mode='reflect')
        out = torch.nn.functional.conv2d(img, kernel, groups=img.shape[1])
        return out

    def pyramid_decom(self, img):
        current = img
        pyr = []
        for _ in range(self.num_high):
            filtered = self.conv_gauss(current, self.kernel)
            down = self.downsample(filtered)
            up = self.upsample(down)
            if up.shape[2] != current.shape[2] or up.shape[3] != current.shape[3]:
                up = nn.functional.interpolate(up, size=(current.shape[2], current.shape[3]))
            diff = current - up
            pyr.append(diff)
            current = down
        return pyr



class PreAtt(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(PreAtt, self).__init__()
        self.conv_bn_relu = nn.Sequential(
            nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=out_channels),
            nn.ReLU()
        )
        self.conv_1x1 = nn.Sequential(
            nn.Conv2d(in_channels=out_channels, out_channels=out_channels, kernel_size=1, stride=1, padding=0),
            nn.BatchNorm2d(num_features=out_channels),
        )
        # self.max_pool = nn.AdaptiveMaxPool2d(1)
        self.avg_pool = nn.AdaptiveAvgPool2d(1)

        # self.pool_h = nn.AdaptiveAvgPool2d((None, 1))
        # self.pool_w = nn.AdaptiveAvgPool2d((1, None))
        self.conv = nn.Conv2d(2, 1, kernel_size=3, stride=1,padding=1, bias=False)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        """Forward function."""
        feat = self.conv_bn_relu(x)
        identity = feat
        max_out, _ = torch.max(feat, dim=1, keepdim=True)  # b*1*h*w
        avg_out = torch.mean(feat, dim=1, keepdim=True)
        spatial_out = self.sigmoid(self.conv(torch.cat([max_out, avg_out], dim=1)))  # b, 1,h,w

        feat = spatial_out*identity

        atten = self.avg_pool(feat)
        atten = self.conv_1x1(atten)  # （B,N,1,1)

        return feat, atten


class CAM(nn.Module):
    def __init__(self, channels,  ext=2, v=16):
        super(CAM, self).__init__()
        self.v = v
        # self.g1 = nn.Parameter(torch.zeros(1))

        self.out_mlp = nn.Sequential(nn.Linear(channels, channels//2), nn.ReLU(), nn.Linear(channels//2, channels))
        self.global_att = PreAtt(channels*ext, channels)
        self.context_att = PreAtt(640, channels)

        self.context_head = nn.Sequential(
            nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=channels),
            nn.ReLU(inplace=True)
        )

        self.smooth = nn.Sequential(
            nn.Conv2d(in_channels=channels, out_channels=channels, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(num_features=channels),
        )

        self.q_norm = nn.LayerNorm(channels // v, eps=1e-6)
        self.kv_norm = nn.LayerNorm(channels // v, eps=1e-6)
        self.ffn_norm = nn.LayerNorm(channels, eps=1e-6)
        self.soft = nn.Softmax(dim=-1)

    def forward(self, g_feat, c_feat):  #local  global    a1,a2

        g_feat, g_att = self.global_att(g_feat)
        c_feat, c_att = self.context_att(c_feat)

        b, c, h, w = g_att.size()  # （B,N,1,1)
        g_att_rs = g_att.view(b, self.v, c // self.v)  # （B,16,8)
        c_att_rs = c_att.view(b, self.v, c // self.v)

        c_att_rs = self.q_norm(c_att_rs)
        g_att_rs = self.kv_norm(g_att_rs)

        energy = torch.bmm(g_att_rs, c_att_rs.permute(0, 2, 1))   # (B,16,16)
        energy = energy/math.sqrt(self.v)
        energy = self.soft(energy)     # (B,16,16)        #$###########
        v = torch.bmm(energy, g_att_rs)  # (B,16,8)
        energy = v + g_att_rs             # (B,16,8)
        #energy = v + c_att_rs  # (B,16,8)                #$###########   a2

        energy = self.ffn_norm(energy.view(b, -1))   # (B,16*8)
        mlp_out = F.relu(self.out_mlp(energy)).unsqueeze(-1).unsqueeze(-1)

        g_feat = torch.mul(g_feat, mlp_out)
        c_feat = torch.mul(c_feat, mlp_out)

        # big = self.spa1(s_feat)
        # s_feat =s_feat+ big
        # small = self.spa2(c_feat)
        # c_feat = c_feat+ small

        c_feat = F.interpolate(c_feat, g_feat.size()[2:], mode='bilinear', align_corners=False)
        c_feat = self.context_head(c_feat)  ###### c_feat_upsample

        out = self.smooth(g_feat + c_feat)
        #out = self.smooth(torch.cat([s_feat , c_feat], dim=1))

        return g_feat, c_feat, out


class Fusion(nn.Module):
    def __init__(self,channels):
        super(Fusion, self).__init__()
        self.fuse = CAM(channels, 2)
        self.conv1 = nn.Conv2d(192, 128, kernel_size=1)

    def forward(self, deep_feat, feat16,feat8, feat4): ## 512,256,64,32

        up16 = F.interpolate(deep_feat, feat16.size()[2:], mode='bilinear', align_corners=False)
        out16 = torch.cat([up16, feat16], dim=1)   #512+128

        s_feat, c_feat, out8 = self.fuse(feat8, out16)
        
        up4 = F.interpolate(out8, feat4.size()[2:], mode='bilinear', align_corners=False)
        out4 = torch.cat([up4, feat4], dim=1)   #64+128
        out4 = self.conv1(out4)

        return c_feat, out8, out4

class SrHead(nn.Module):
    # super resolution Head
    def __init__(self, conv_cfg, norm_cfg, act_cfg, channels=128, up_lists=[4,2,2]):
        super(SrHead, self).__init__()
        # self.iwt = IWT()

        self.up1 = nn.Upsample(scale_factor=up_lists[0])

        self.conv1 = ConvModule(channels, channels // 4, 3, stride=1, padding=1, conv_cfg=conv_cfg,
                                norm_cfg=norm_cfg, act_cfg=act_cfg)

        self.up2 = nn.Upsample(scale_factor=up_lists[1])

        self.conv2 = ConvModule(channels // 4, channels//8, 3, stride=1, padding=1, conv_cfg=conv_cfg,
                                norm_cfg=norm_cfg, act_cfg=act_cfg)
        self.up3 = nn.Upsample(scale_factor=up_lists[2])

        self.conv3 = ConvModule(channels // 8, channels//16, 3, stride=1, padding=1, conv_cfg=conv_cfg,
                                norm_cfg=norm_cfg, act_cfg=act_cfg)

        self.sr_head = SegmentationHead(conv_cfg, norm_cfg, act_cfg, channels//16, channels // 32, 3, kernel_size=1)


    def forward(self, x):
        x = self.up1(x)
        x = self.conv1(x)
        x = self.up2(x)
        x = self.conv2(x)
        # y = self.iwt(x)  # b,8,h/2,w/2
        x = self.up3(x)
        x = self.conv3(x)  # b,c,h/8,w/8
        recons_out = self.sr_head(x)

        return recons_out





@HEADS.register_module()
class SunHead(BaseCascadeDecodeHead):
    def __init__(self,  prev_channels,  **kwargs):
        super(SunHead, self).__init__(**kwargs)

        # self.dwt = DWT()
        # self.conv_bn_relu = nn.Sequential(
        #     nn.Conv2d(in_channels=12, out_channels=6, kernel_size=3, stride=1, padding=1),
        #     nn.BatchNorm2d(num_features=6),
        #     nn.ReLU()
        # )

        self.laplace = Lap_Pyramid_Conv(num_high=1)
        self.gc_net = GlobalContextNet(in_channels=3, pretrain_model=False)
        self.fusion = Fusion(self.channels)

        self.conv_seg = SegmentationHead(self.conv_cfg, self.norm_cfg, self.act_cfg, self.channels,
                                         self.channels // 4, self.num_classes, kernel_size=1)
        self.conv_seg1 = SegmentationHead(self.conv_cfg, self.norm_cfg, self.act_cfg, self.channels,
                                         self.channels // 4, self.num_classes, kernel_size=1)
        self.conv_seg2 = SegmentationHead(self.conv_cfg, self.norm_cfg, self.act_cfg, self.channels,
                                            self.channels // 4, self.num_classes, kernel_size=1)

        # self.deconv = nn.ConvTranspose2d(128, 128, kernel_size=3, stride=2, padding=1, dilation=1, output_padding=1)
        # self.bound = BoundaryExtractor(7)
        self.sr_head = SrHead(self.conv_cfg, self.norm_cfg, self.act_cfg, channels=128,up_lists=[4,2, 2])

        self.a = 0.1
        self.b = 0.9
        #self.a = nn.Parameter(torch.zeros(1))
        #self.b = nn.Parameter(torch.zeros(1))

    def forward(self, inputs, prev_output, is_train=True):
        """Forward function."""
        #Stem block

        lp_results = self.laplace.pyramid_decom(inputs)
        lp_1 = lp_results[0]
        lp_input = (lp_1*self.a)+(inputs*self.b)

        # lp_results = self.laplace.pyramid_decom(inputs)
        # lp_1 = lp_results[0]
        # lp_2 = F.interpolate(lp_results[1], lp_results[0].size()[2:], mode='bilinear',align_corners=False)
        # lp_input = torch.cat([lp_1, lp_2], dim=1)

        #dwt_results = self.dwt(inputs)  #b,3*4,h/2,w/2
        #print(dwt_results.shape)
        #dwt_results = self.conv_bn_relu(dwt_results)  #b,3*4/2,h/2,w/2
        #print(dwt_results.shape)

        #_, feat4, feat8, feat16 = self.gc_net(inputs)

        _, feat4, feat8, feat16 = self.gc_net(lp_input)
        deep_feat = prev_output[0]

        c_feat, out8, out4 = self.fusion(deep_feat, feat16,feat8, feat4)
        output = self.conv_seg(out4)

        if is_train:
            output_1 = self.conv_seg1(out8)
            output_2 = self.conv_seg2(c_feat)

            #srhead
            recons_out = self.sr_head(deep_feat)
            losses_re1 = self.recons_loss(inputs, recons_out, re_weight=1.0)
            losses_re2 = self.recons_loss(lp_input, recons_out, re_weight=1.0)

            return output, output_1, output_2, losses_re1, losses_re2

        else:
            return output

    def recons_loss(self, img, pred, re_weight=0.5):
        loss = dict()
        if pred.size()[2:] != img.size()[2:]:
            pred = F.interpolate(pred, img.size()[2:], mode='bilinear', align_corners=False)
        recon_loss = F.mse_loss(pred, img) * re_weight
        loss['Sr'] = recon_loss
        return loss



    def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, train_cfg):
        seg_out,seg_aux1, seg_aux2, loss_re, loss_re2 = self.forward(inputs, prev_output)

        #print( gt_semantic_seg.shape)
        #bound = self.bound(gt_semantic_seg)

        losses = self.losses(seg_out, gt_semantic_seg)
        losses_aux1 = self.losses(seg_aux1, gt_semantic_seg)
        losses_aux2 = self.losses(seg_aux2, gt_semantic_seg)
        #losses_aux3 = self.losses(logits_3, gt_semantic_seg)

        return losses, losses_aux1, losses_aux2, loss_re, loss_re2

    def forward_test(self, inputs, prev_output, img_metas, test_cfg):
        """Forward function for testing.

        Args:
            inputs (list[Tensor]): List of multi-level img features.
            prev_output (Tensor): The output of previous decode head.
            img_metas (list[dict]): List of image info dict where each dict
                has: 'img_shape', 'scale_factor', 'flip', and may also contain
                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
                For details on the values of these keys see
                `mmseg/datasets/pipelines/formatting.py:Collect`.
            test_cfg (dict): The testing config.

        Returns:
            Tensor: Output segmentation map.
        """

        return self.forward(inputs, prev_output, False)


