import torch
import torch.nn as nn
import torch.nn.functional as F

from .swin_transformer import SwinTransformer
# from swin_transformerv2 import SwinTransformerV2
from .loss import VarLoss, SILogLoss
from pytorch_wavelets import DWTForward, DWTInverse

# from .modules.HighFreqEnhanced2WTConv import HighFreqEnhancedWTConv2d
from .modules.AFEBlock import AFE
from .modules.WTConv import WTConv2d
from .modules.FreqFusion import FreqFusion


# from .modules.HighFreqFocusWTConv import HighFreqFocusWTConv2d


########################################################################################################################

class DoubleConv(nn.Module):
    def __init__(self, in_channels, out_channels, mid_channels=None):
        super().__init__()
        
        if not mid_channels:
            mid_channels = out_channels
        
        self.conv1 =  nn.Sequential(
            nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, groups=4),
            #nn.BatchNorm2d(mid_channels),
            nn.InstanceNorm2d(mid_channels),
            #nn.ReLU(inplace=True),
            nn.LeakyReLU(),
        )
        self.conv2 = nn.Sequential(
            #ModulatedDeformConvPack(mid_channels, out_channels, kernel_size=3, padding=1),
            nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
            #nn.BatchNorm2d(out_channels),
            nn.InstanceNorm2d(out_channels),
            #nn.ReLU(inplace=True),
            nn.LeakyReLU(),
        )

        self.bt = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)


    def forward(self, x):
        skip = self.bt(x)

        x = self.channel_shuffle(x, 4)

        x = self.conv1(x)

        x = self.conv2(x)

        return x + skip

    def channel_shuffle(self, x, groups):
        batchsize, num_channels, height, width = x.shape

        channels_per_group = num_channels // groups
        # reshape
        x = x.view(batchsize, groups, channels_per_group, height, width)

        x = torch.transpose(x, 1, 2).contiguous()

        # flatten
        x = x.view(batchsize, -1, height, width)

        return x


class Up(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        # self.use_wavelet = use_wavelet
        # if self.use_wavelet:
        #     self.upsample = WaveletUpsample(wave='haar')
        # else:
        #     self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        self.up = nn.Upsample(
            scale_factor=2, mode='bilinear', align_corners=True)
        self.conv = DoubleConv(
            in_channels, out_channels, in_channels)

    def forward(self, x1, x2=None):
        x1 = self.up(x1)
        # x1 = self.upsample(x1)
        # input is CHW
        if x2 is not None:
            diffY = x2.size()[2] - x1.size()[2]
            diffX = x2.size()[3] - x1.size()[3]
            if diffX > 0 or diffY > 0:
                x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                                diffY // 2, diffY - diffY // 2])
            x = torch.cat([x2, x1], dim=1)
        else:
            x = x1
        return self.conv(x)

class FreqFusionUp(nn.Module):
    # hf 代表高频特征，通道数少，lf代表低频特征，通道数多
    def __init__(self, lf_in_channels,hf_in_channels, out_channels):
        super().__init__()
        self.hfc = hf_in_channels
        self.lfc = lf_in_channels
        self.upFreqFusion = FreqFusion(hr_channels=self.hfc, lr_channels=self.lfc)
        in_channels = self.hfc + self.lfc
        self.conv = DoubleConv(in_channels, out_channels,in_channels)

    def forward(self, x1, x2=None):
        # x2通道数少，代表高频特征，x1通道数多，代表低频特征
        mask_lr,x2,x1 = self.upFreqFusion(hr_feat=x2,lr_feat=x1)
        x = torch.cat([x2,x1], dim=1)
        return self.conv(x)


class OutConv(nn.Module):
    def __init__(self, in_channels, out_channels, prior_mean = 1.54):
        super(OutConv, self).__init__()

        self.prior_mean = prior_mean
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)

    def forward(self, x):
        return torch.exp(self.conv(x) + self.prior_mean)


class VarLayer(nn.Module):
    def __init__(self, in_channels, h, w):
        super(VarLayer, self).__init__()

        self.gr = 16

        self.grad = nn.Sequential(
                nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1),
                #nn.BatchNorm2d(in_channels),
                nn.LeakyReLU(),
                nn.Conv2d(in_channels, 4*self.gr, kernel_size=3, padding=1))

        self.att = nn.Sequential(
                nn.Conv2d(in_channels, in_channels, kernel_size=3, padding=1),
                #nn.BatchNorm2d(in_channels),
                nn.LeakyReLU(),
                nn.Conv2d(in_channels, 4*self.gr, kernel_size=3, padding=1),
                nn.Sigmoid())


        num = h * w

        a = torch.zeros(num, 4, num, dtype=torch.float16)

        for i in range(num):

            #a[i, 0, i] = 1.0
            #if i + 1 < num:
            if (i+1) % w != 0 and (i+1) < num:
                a[i, 0, i] = 1.0
                a[i, 0, i+1] = -1.0

            #a[i, 1, i] = 1.0
            if i + w < num:
                a[i, 1, i] = 1.0
                a[i, 1, i+w] = -1.0

            if (i+2) % w != 0 and (i+2) < num:
                a[i, 2, i] = 1.0
                a[i, 2, i+2] = -1.0

            if i + w + w < num:
                a[i, 3, i] = 1.0
                a[i, 3, i+w+w] = -1.0

        a[-1, 0, -1] = 1.0
        a[-1, 1, -1] = 1.0

        a[-1, 2, -1] = 1.0
        a[-1, 3, -1] = 1.0

        self.register_buffer('a', a.unsqueeze(0))

        self.ins = nn.GroupNorm(1, self.gr)

        self.se = nn.Sequential(
                nn.AdaptiveAvgPool2d((1, 1)),
                nn.Conv2d(in_channels, in_channels//2, kernel_size=1, padding=0),
                nn.LeakyReLU(),
                nn.Conv2d(in_channels//2, self.gr, kernel_size=1, padding=0),
                nn.Sigmoid())

        self.post = nn.Sequential(
                nn.Conv2d(self.gr, 8*self.gr, kernel_size=3, padding=1))

    def forward(self, x):
        skip = x.clone()
        att = self.att(x)
        grad = self.grad(x)


        se = self.se(x)

        n, c, h, w = x.shape

        att = att.reshape(n*self.gr, 4, h*w, 1).permute(0, 2, 1, 3)
        grad = grad.reshape(n*self.gr, 4, h*w, 1).permute(0, 2, 1, 3)

        A = self.a * att
        B = grad * att

        A = A.reshape(n*self.gr, h*w*4, h*w)
        B = B.reshape(n*self.gr, h*w*4, 1)

        AT = A.permute(0, 2, 1)

        ATA = torch.bmm(AT, A)
        ATB = torch.bmm(AT, B)

        # 增加 jitter 以确保数值稳定性
        jitter_value = 1e-6  # 从 1e-12 增加到 1e-6
        jitter = torch.eye(h * w, dtype=x.dtype, device=x.device).unsqueeze(0) * jitter_value
        ATA_jitter = ATA + jitter

        # jitter = torch.eye(n=h*w, dtype=x.dtype, device=x.device).unsqueeze(0) * 1e-12
        # x, _ = torch.solve(ATB, ATA+jitter)

        # 替换 torch.solve 为 torch.linalg.solve
        try:
            # torch.linalg.solve 需要 ATA_jitter 形状为 (..., n, n)，ATB 为 (..., n, m)
            x_solved = torch.linalg.solve(ATA_jitter, ATB)
        except RuntimeError as e:
            # 如果 ATA_jitter 仍然是奇异的，回退到最小二乘解
            print("torch.linalg.solve 由于矩阵奇异而失败，回退到 torch.linalg.lstsq。")
            x_solved = torch.linalg.lstsq(ATA_jitter, ATB).solution

        x = x_solved.reshape(n, self.gr, h, w)

        x = self.ins(x)

        x = se * x

        x = self.post(x)

        return x



class Refine(nn.Module):
    def __init__(self, c1, c2):
        super(Refine, self).__init__()

        s = c1 + c2
        self.fw = nn.Sequential(
                nn.Conv2d(s, s, kernel_size=3, padding=1),
                nn.LeakyReLU(),
                nn.Conv2d(s, c1, kernel_size=3, padding=1))

        self.dw = nn.Sequential(
                nn.Conv2d(s, s, kernel_size=3, padding=1),
                nn.LeakyReLU(),
                nn.Conv2d(s, c2, kernel_size=3, padding=1))

    def forward(self, feat, depth):
        cc = torch.cat([feat, depth], 1)
        feat_new = self.fw(cc)
        depth_new = self.dw(cc)
        return feat_new, depth_new


class MetricLayer(nn.Module):
    def __init__(self, c):
        super(MetricLayer, self).__init__()

        self.ln = nn.Sequential(
                nn.Linear(c, c//4),
                nn.LeakyReLU(),
                nn.Linear(c//4, 2))

    def forward(self, x):

        x = x.squeeze(-1).squeeze(-1)
        x = self.ln(x)
        x = x.unsqueeze(-1).unsqueeze(-1)

        return x


class VADepthNet(nn.Module):
    def __init__(self, pretrained=None, max_depth=10.0, prior_mean=1.54, si_lambda=0.85, img_size=(512, 640)):
        super().__init__()

        self.prior_mean = prior_mean
        self.SI_loss_lambda = si_lambda
        self.max_depth = max_depth

        pretrain_img_size = img_size
        patch_size = (4, 4)
        in_chans = 3
        embed_dim = 192
        depths = [2, 2, 18, 2]
        num_heads = [6, 12, 24, 48]
        window_size = 12

        backbone_cfg = dict(
            pretrain_img_size=pretrain_img_size,
            patch_size=patch_size,
            embed_dim=embed_dim,
            depths=depths,
            num_heads=num_heads,
            window_size=window_size,
            # ape=True,
            ape=False,
            drop_rate=0.
        )

        # self.enhanceHighFreqImage = HighFreqEnhancedWTConv2d(in_channels=3, out_channels=3)
        self.backbone = SwinTransformer(**backbone_cfg)
        
        self.backbone.init_weights(pretrained=pretrained)
        # self.backbone.init_weights(pretrained=pretrained)

        # 处理后的特征先经过小波增强特征，然后经过AFE增强特征
        self.wtc5 = WTConv2d(in_channels=1536, out_channels=1536)
        self.wtc4 = WTConv2d(in_channels=768, out_channels=768)
        self.wtc3 = WTConv2d(in_channels=384, out_channels=384)
        self.wtc2 = WTConv2d(in_channels=192, out_channels=192)

        self.afe5 = AFE(1536)
        self.afe4 = AFE(768)
        self.afe3 = AFE(384)
        self.afe2 = AFE(192)


        self.up_4 = Up(1536 + 768, 512)
        self.up_3 = Up(512 + 384, 256)
        self.up_2 = Up(256 + 192, 64)
        self.freFusionUp_4 = FreqFusionUp(1536,768,512)
        self.freFusionUp_3 = FreqFusionUp(512,384,256)
        self.freFusionUp_2 = FreqFusionUp(256,192,64)
        # self.highEnhance4 = HighFreqFocusWTConv2d(512,512)
        # self.highEnhance3 = HighFreqFocusWTConv2d(256,256)
        # self.highEnhance2 = HighFreqFocusWTConv2d(64,64)

        self.outc = OutConv(128, 1, self.prior_mean)

        self.vlayer = VarLayer(512, img_size[0]//16, img_size[1]//16)

        self.ref_4 = Refine(512, 128)
        self.ref_3 = Refine(256, 128)
        self.ref_2 = Refine(64, 128)

        self.var_loss = VarLoss(128, 512)
        self.si_loss = SILogLoss(self.SI_loss_lambda, self.max_depth)

        self.mlayer = nn.Sequential(
                nn.AdaptiveMaxPool2d((1,1)),
                MetricLayer(1536))

    def forward(self, x, gts=None):

        # x = self.enhanceHighFreqImage(x)
        x2, x3, x4, x5 = self.backbone(x)  # feats[]：[4,192,120,160],[4,384,60,80],[4,768,30,40],[4,1536,15,20]

        # 跳跃连接处理，使用wtc和afe增强特征
        x5 = self.wtc5(x5)
        x5 = self.afe5(x5)
        x4 = self.wtc4(x4)
        x4 = self.afe4(x4)
        x3 = self.wtc3(x3)
        x3 = self.afe3(x3)
        x2 = self.wtc2(x2)
        x2 = self.afe2(x2)

        outs = {}

        metric = self.mlayer(x5)

        x = self.up_4(x5, x4) # 4,512,30,40
        # x = self.freFusionUp_4(x5, x4) # 4,512,30,40
        # x = self.highEnhance4(x)

        d = self.vlayer(x) # 4，128,120,160

        if self.training:
            var_loss = self.var_loss(x, d, gts)


        x, d  = self.ref_4(x, d)

        d_u4 = F.interpolate(d, scale_factor=16, mode='bilinear', align_corners=True) # 4,128，480,640

        x = self.up_3(x, x3) # 4,256,60,80
        # x = self.freFusionUp_3(x, x3) # 4,256,60,80
        # x = self.highEnhance3(x)

        x, d = self.ref_3(x, F.interpolate(d, scale_factor=2, mode='bilinear', align_corners=True))

        d_u3 = F.interpolate(d, scale_factor=8, mode='bilinear', align_corners=True) # 4,128,480,640

        x = self.up_2(x, x2) # 4,64,120,160
        # x = self.freFusionUp_2(x, x2) # 4,64,120,160
        # x = self.highEnhance2(x)

        x, d = self.ref_2(x, F.interpolate(d, scale_factor=2, mode='bilinear', align_corners=True))

        d_u2 = F.interpolate(d, scale_factor=4, mode='bilinear', align_corners=True) # 4，,128,480,640

        d = d_u2 + d_u3 + d_u4  # 4,128,480,640

        d = torch.sigmoid(metric[:, 0:1]) * (self.outc(d) + torch.exp(metric[:, 1:2]))

        outs['scale_1'] = d # 4,1,480，640

        if self.training:
            si_loss = self.si_loss(outs, gts)
            return outs['scale_1'], var_loss + si_loss
        else:
            return outs['scale_1']
#
# if __name__ == '__main__':
#     # x = torch.rand(4, 128, 16, 16)
#     # mask = torch.rand(4, 4 * 25, 16, 16)
#     # carafe(x, mask, kernel_size=5, group=1, up=2)
#     if torch.cuda.is_available():
#         device = torch.device('cuda')
#     else:
#         device = torch.device('cpu')
#
#     hr_feat = torch.rand(4, 384, 60, 80).to(device)
#     lr_feat = torch.rand(4, 768, 30, 40).to(device)
#     model = FreqFusion(hr_channels=384, lr_channels=768).to(device)
#     mask_lr, hr_feat, lr_feat = model(hr_feat=hr_feat, lr_feat=lr_feat)
#     print(hr_feat.shape)
#     print(lr_feat.shape)

# 测试freFusionUp
# if __name__ == '__main__':
#     if torch.cuda.is_available():
#         device = torch.device('cuda')
#     else:
#         device = torch.device('cpu')
#     model = FreqFusionUp(256,192,64).to(device)
#     x2 = torch.randn(4,192,120,160).to(device)
#     x = torch.randn(4,256,60,80).to(device)
#     out = model(x, x2)
#     print(out.shape)
#

# # 增加测试up的main函数
# if __name__ == '__main__':
#     model = Up(1536+768, 512)
#     x1 = torch.randn(4,1536,15,20)
#     x2 = torch.randn(4,768,30,40)
#     out = model(x1, x2)
#     print(out.shape)  # 4,512,30,40

# if __name__ == '__main__':
#     # pretrained = 'E:\\projects\\ftdepth\\model_zoo\\swin_transformer\\swin_large_patch4_window7_224_22k.pth'
#     pretrained = 'E:\\projects\\ftdepth\\model_zoo\\swinv2_large_patch4_window12_192_22k.pth'
#     x= torch.randn(4, 3, 480, 640)
#     model = VADepthNet(pretrained=pretrained)
#     x2,x3,x4,x5 = model(x)
#     print(x2.shape)
#     print(x3.shape)
#     print(x4.shape)
#     print(x5.shape)


if __name__ == '__main__':
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')
    x= torch.randn(4, 3, 512, 640).to(device)
    gt = torch.rand(4, 1, 512, 640).to(device)
    model = VADepthNet().to(device)
    est,loss = model(x, gt)
    print(est.shape)
    print(loss)

# if __name__ == "__main__":
#     # 创建一个随机输入张量 [N, C, H, W]
#     x = torch.randn(4, 256, 60, 80)  # 示例输入
#
#     # 初始化小波上采样层
#     wavelet_upsample = WaveletUpsample(wave='haar')
#
#     # 前向传播
#     x_up = wavelet_upsample(x)
#
#     print(f"输入尺寸: {x.shape}")
#     print(f"上采样后尺寸: {x_up.shape}")
