import torch
import torch.nn as nn
from torch.nn import functional as F
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .cascade_decode_head import BaseCascadeDecodeHead
from .vmunet import VMUNet

class SegmentationHead(nn.Module):
    def __init__(self, conv_cfg, norm_cfg, act_cfg, in_channels, mid_channels, n_classes, *args, **kwargs):
        super(SegmentationHead, self).__init__()

        self.conv_bn_relu = ConvModule(in_channels, mid_channels, 3,
                                       stride=1,
                                       padding=1,
                                       conv_cfg=conv_cfg,
                                       norm_cfg=norm_cfg,
                                       act_cfg=act_cfg)

        self.conv_out = nn.Conv2d(mid_channels, n_classes, kernel_size=1, bias=True)

    def forward(self, x):
        x = self.conv_bn_relu(x)
        x = self.conv_out(x)
        return x

class SrHead(nn.Module):
    # super resolution Head
    def __init__(self, conv_cfg, norm_cfg, act_cfg, channels=128, up_lists=[4,2,2]):
        super(SrHead, self).__init__()
        # self.iwt = IWT()
        self.up1 = nn.Upsample(scale_factor=up_lists[0])
        self.conv1 = ConvModule(channels, channels // 4, 3, stride=1, padding=1, conv_cfg=conv_cfg,
                                norm_cfg=norm_cfg, act_cfg=act_cfg)
        self.up2 = nn.Upsample(scale_factor=up_lists[1])
        self.conv2 = ConvModule(channels // 4, channels//8, 3, stride=1, padding=1, conv_cfg=conv_cfg,
                                norm_cfg=norm_cfg, act_cfg=act_cfg)
        self.up3 = nn.Upsample(scale_factor=up_lists[2])
        self.conv3 = ConvModule(channels // 8, channels//16, 3, stride=1, padding=1, conv_cfg=conv_cfg,
                                norm_cfg=norm_cfg, act_cfg=act_cfg)
        self.sr_head = SegmentationHead(conv_cfg, norm_cfg, act_cfg, channels//16, channels // 32, 3, kernel_size=1)

    def forward(self, x):
        x = self.up1(x)
        x = self.conv1(x)
        x = self.up2(x)
        x = self.conv2(x)
        # y = self.iwt(x)  # b,8,h/2,w/2
        x = self.up3(x)
        x = self.conv3(x)  # b,c,h/8,w/8
        recons_out = self.sr_head(x)
        return recons_out

class Decode(nn.Module):
    def __init__(self):
        super(Decode, self).__init__()
        #self.fuse = CAM(channels, 2)
        self.conv1 = nn.Conv2d(1248, 512, kernel_size=1)
        self.conv = nn.Conv2d(2048, 768, kernel_size=1)

    def forward(self, logit,res32, vm): ## 2048,768
        res = self.conv(res32)
        feat = res + logit
        up16 = F.interpolate(feat, vm[2].size()[2:], mode='bilinear', align_corners=False)
        out16 = torch.cat([up16, vm[2]], dim=1)   #768+384

        up4 = F.interpolate(out16, vm[0].size()[2:], mode='bilinear', align_corners=False)
        out4 = torch.cat([up4, vm[0]], dim=1)   #96+384+768
        out4 = self.conv1(out4)

        return out4

class AdaptiveInteractionModule(nn.Module):
    def __init__(self):
        super(AdaptiveInteractionModule, self).__init__()
        # 定义可学习参数
        # self.a = nn.Parameter(torch.ones(1))
        # self.b = nn.Parameter(torch.ones(1))
        # self.c = nn.Parameter(torch.ones(1))
        # self.d = nn.Parameter(torch.ones(1))
        self.a = [nn.Parameter(torch.ones(1)) for _ in range(4)]
        #self.proj = nn.Linear(input_dim, output_dim)
        # # 初始化可学习参数
        # nn.init.uniform_(self.weight, -0.1, 0.1)
        # 定义1x1卷积层
        # self.conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=1, stride=1, padding=0, bias=False)

    def forward(self, net1, net2):
        # 将net1中的每个张量通道数调整为与net2中对应张量相同
        res_list = []
        for i in range(len(net1)):
            net1[i] = self.adjust_channels(net1[i], net2[i])
            #net1[i] = self.a * net1[i]
            net1[i] = self.a[i].to("cuda") * net1[i]

            res_list.append(net1[i])

        return res_list

    def adjust_channels(self, tensor, new_channels):
        # 将张量通道数调整为new_channels
        self.dwconv = nn.Conv2d(tensor.shape[1], new_channels, kernel_size=1, stride=1, padding=0).cuda()
        tensor = self.dwconv(tensor)
        tensor = tensor.permute(0, 2,3,1)
        return tensor

@HEADS.register_module()
class MambaHead(BaseCascadeDecodeHead):
    def __init__(self,  prev_channels,  **kwargs):
        super(MambaHead, self).__init__(**kwargs)
        self.test = VMUNet(
            num_classes=2,
            input_channels=3,
            depths=[2, 2, 2, 2],
            #depths_decoder=[2,2,2,1],
            depths_decoder=[],
            drop_path_rate=0.2,
            load_ckpt_path="/home/songst/VM-UNet-main/pre_trained_weights/vmamba_small_e238_ema.pth"
        )
        self.test.load_from()
        #self.fusion = Fusion(self.channels)
        self.decode = Decode()

        self.conv_seg = SegmentationHead(self.conv_cfg, self.norm_cfg, self.act_cfg, self.channels,
                                         self.channels // 4, self.num_classes, kernel_size=1)
        # self.conv_seg1 = SegmentationHead(self.conv_cfg, self.norm_cfg, self.act_cfg, self.channels,
        #                                  self.channels // 4, self.num_classes, kernel_size=1)

        self.sr_head = SrHead(self.conv_cfg, self.norm_cfg, self.act_cfg, channels=128,up_lists=[4,2, 2])
        # self.a = 0.1
        # self.b = 0.9
        # self.a = nn.Parameter(torch.zeros(1))
        # self.b = nn.Parameter(torch.zeros(1))
        self.ai = AdaptiveInteractionModule()

    def forward(self, inputs, prev_output, is_train=True):
        """Forward function."""
        #Stem block

        res_output = list(prev_output)
        # print("res-------------------------------------------", len(prev_output))
        # for i,j in enumerate(prev_output):
        #     print("res------------------------------------------{}".format(i),j.shape )
        # res - -----------------------------------------0
        # torch.Size([8, 256, 128, 128])
        # res - -----------------------------------------1
        # torch.Size([8, 512, 64, 64])
        # res - -----------------------------------------2
        # torch.Size([8, 1024, 32, 32])
        # res - -----------------------------------------3
        # torch.Size([8, 2048, 16, 16])

        # lp_results = self.laplace.pyramid_decom(inputs)
        # lp_1 = lp_results[0]
        # lp_input = (lp_1*self.a)+(inputs*self.b)

        # lp_results = self.laplace.pyramid_decom(inputs)
        # lp_1 = lp_results[0]
        # lp_2 = F.interpolate(lp_results[1], lp_results[0].size()[2:], mode='bilinear',align_corners=False)
        # lp_input = torch.cat([lp_1, lp_2], dim=1)

        #dwt_results = self.dwt(inputs)  #b,3*4,h/2,w/2
        #print(dwt_results.shape)
        #dwt_results = self.conv_bn_relu(dwt_results)  #b,3*4/2,h/2,w/2
        #print(dwt_results.shape)

        #_, feat4, feat8, feat16 = self.gb_net(lp_input)
        #deep_feat = prev_output[0]
        #logits,vm = self.test(lp_input)

        res32 = res_output[3]
        res2 = self.ai(res_output, [96, 192, 384, 768])
        # print("res2-------------------------------------------", len(res2))
        # for i, j in enumerate(res2):
        #     print("res2------------------------------------------{}".format(i), j.shape)
        logits,vm = self.test(inputs,res2)

        logits = logits.permute(0, 3,1,2)
        for i in range(len(vm)):
            vm[i] = vm[i].permute(0, 3,1,2)

        # 先将res改通道,然后再permute和vm    yiyang   jiejian  st-unet de ronghe
        # ranhou   fusion  gaijiandanyidian   jiejian  pinyu      zhijie  interpret   oringin_size
        #
        # print("vm-res-3----------------------------------")
        #
        # print(res32.shape)
        # print(logits.shape)
        # print("--------------------------------")

        out4 = self.decode(logits,res32,vm)
        #c_feat, out8, out4 = self.fusion(deep_feat, vm[2],vm[1],vm[0])
        #c_feat, out8, out4 = self.fusion(deep_feat, feat16,feat8, feat4)
        output = self.conv_seg(out4)

        # if is_train:
        #     output_1 = self.conv_seg1(out8)
        #     output_2 = self.conv_seg2(c_feat)
        #     recons_out =  self.sr_head(deep_feat)
        #     losses_re1 = self.recon_loss(inputs, recons_out, re_weight=1.0)
        #     losses_re2 = self.recon_loss(lp_input, recons_out, re_weight=1.0)
        #
        #     return output, output_1, output_2, losses_re1, losses_re2
        #
        # else:
        return output

    def recon_loss(self, img, pred, re_weight=0.5):
        loss = dict()
        if pred.size()[2:] != img.size()[2:]:
            pred = F.interpolate(pred, img.size()[2:], mode='bilinear', align_corners=False)
        recon_loss = F.mse_loss(pred, img) * re_weight
        loss['Sr'] = recon_loss
        return loss

    def forward_train(self, inputs, prev_output, img_metas, gt_semantic_seg, train_cfg):
        seg_out = self.forward(inputs, prev_output)
        losses = self.losses(seg_out, gt_semantic_seg)
        #print( gt_semantic_seg.shape)
        #bound = self.bound(gt_semantic_seg)
        # losses_aux1 = self.losses(seg_aux1, gt_semantic_seg)
        # losses_aux2 = self.losses(seg_aux2, gt_semantic_seg)
        #losses_aux3 = self.losses(logits_3, gt_semantic_seg)

        return losses# losses_aux1, losses_aux2, loss_re, loss_re2

    def forward_test(self, inputs, prev_output, img_metas, test_cfg):
        """Forward function for testing.

        Args:
            inputs (list[Tensor]): List of multi-level img features.
            prev_output (Tensor): The output of previous decode head.
            img_metas (list[dict]): List of image info dict where each dict
                has: 'img_shape', 'scale_factor', 'flip', and may also contain
                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
                For details on the values of these keys see
                `mmseg/datasets/pipelines/formatting.py:Collect`.
            test_cfg (dict): The testing config.

        Returns:
            Tensor: Output segmentation map.
        """
        return self.forward(inputs, prev_output, False)


