import sys
import torch
import torch.nn as nn
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from collections import OrderedDict
import numbers
from mamba import SS2D_local,SS2D_fusion
import functools
from pytorch_wavelets import DWTForward, DWTInverse  # (or import DWT, IDWT)
from einops import rearrange, repeat

def to_3d(x):
    return rearrange(x, 'b c h w -> b (h w) c')

def to_4d(x, h, w):
    return rearrange(x, 'b (h w) c -> b c h w', h=h, w=w)

class BiasFree_LayerNorm(nn.Module):
    def __init__(self, normalized_shape):
        super(BiasFree_LayerNorm, self).__init__()
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = (normalized_shape,)
        normalized_shape = torch.Size(normalized_shape)

        assert len(normalized_shape) == 1

        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.normalized_shape = normalized_shape

    def forward(self, x):
        sigma = x.var(-1, keepdim=True, unbiased=False)
        return x / torch.sqrt(sigma + 1e-5) * self.weight


class WithBias_LayerNorm(nn.Module):
    def __init__(self, normalized_shape):
        super(WithBias_LayerNorm, self).__init__()
        if isinstance(normalized_shape, numbers.Integral):
            normalized_shape = (normalized_shape,)
        normalized_shape = torch.Size(normalized_shape)

        assert len(normalized_shape) == 1

        self.weight = nn.Parameter(torch.ones(normalized_shape))
        self.bias = nn.Parameter(torch.zeros(normalized_shape))
        self.normalized_shape = normalized_shape

    def forward(self, x):
        mu = x.mean(-1, keepdim=True)
        sigma = x.var(-1, keepdim=True, unbiased=False)
        return (x - mu) / torch.sqrt(sigma + 1e-5) * self.weight + self.bias



class LayerNorm(nn.Module):
    def __init__(self, dim, LayerNorm_type='WithBias'):
        super(LayerNorm, self).__init__()
        if LayerNorm_type == 'BiasFree':
            self.body = BiasFree_LayerNorm(dim)
        else:
            self.body = WithBias_LayerNorm(dim)

    def forward(self, x):
        h, w = x.shape[-2:]
        return to_4d(self.body(to_3d(x)), h, w)

class img_encoder(nn.Module):
    def __init__(self):
        super(img_encoder, self).__init__()
        norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
        activation = nn.ReLU(inplace=True)
        self.modelx1 = nn.Sequential(*[nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),  activation,])
        self.modelx2 = nn.Sequential(*[nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),  activation,])
        self.modelx3 = nn.Sequential(*[nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),  activation,])
        self.modelx4 = nn.Sequential(*[nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),  activation,])

        self.modely1 = nn.Sequential(*[nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1),  activation,])
        self.modely2 = nn.Sequential(*[nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),  activation,])
        self.modely3 = nn.Sequential(*[nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),  activation,])
        self.modely4 = nn.Sequential(*[nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),  activation,])
    def split_channels(self, x):
        # 将输入特征拆分为奇数和偶数通道
        odd = x[:, 0::2, :, :]  # 选取奇数通道
        even = x[:, 1::2, :, :]  # 选取偶数通道
        return odd, even
    def shuffle(self,x,y):
        x_odd, x_even=self.split_channels(x)
        y_odd, y_even=self.split_channels(y)
        x = torch.cat([x_odd, y_even], dim=1)
        y = torch.cat([y_odd, x_even], dim=1)
        return x,y
    def forward(self,x,y):
        x1=self.modelx1(x)
        y1 = self.modely1(y)
        # x1,y1=self.shuffle(x1,y1)
        x2=self.modelx2(x1)
        y2=self.modely2(y1)
        # x2,y2=self.shuffle(x2,y2)
        x3=self.modelx3(x2)
        y3=self.modely3(y2)
        # x3,y3=self.shuffle(x3,y3)
        x4=self.modelx4(x3)
        y4=self.modelx4(y3)
        # x4,y4=self.shuffle(x4,y4)
        return [x1,x2,x3,x4],[y1,y2,y3,y4]


class img_decoder(nn.Module):
    def __init__(self):
        super(img_decoder, self).__init__()
        norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
        activation = nn.ReLU(inplace=True)
        self.model1 = nn.Sequential(
            *[nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1,output_padding=1),  activation, ])
        self.model2 = nn.Sequential(
            *[nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2, padding=1,output_padding=1),  activation,
              nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),  activation
              ])
        self.model3 = nn.Sequential(
            *[nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2, padding=1,output_padding=1),  activation,
              nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),  activation
              ])
        self.model4 = nn.Sequential(
            *[nn.ConvTranspose2d(128, 64, kernel_size=3, stride=1, padding=1), activation,
              nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1),  activation
              ])
        self.model5=nn.Sequential(
            nn.Conv2d(32,1, kernel_size=3, stride=1, padding=1),nn.Tanh()
        )
    def forward(self, feature):

        x3 = self.model1(feature[3])
        x2 = self.model2(torch.cat((x3,feature[2]),dim=1))
        x1 = self.model3(torch.cat((x2,feature[1]),dim=1))
        x0 = self.model4(torch.cat((x1,feature[0]),dim=1))
        fusion=self.model5(x0)
        return fusion


def activation(act_type='prelu', slope=0.2, n_prelu=1):
    act_type = act_type.lower()
    if act_type == 'prelu':
        layer = nn.PReLU(num_parameters=n_prelu, init=slope)
    elif act_type == 'lrelu':
        layer = nn.LeakyReLU(negative_slope=slope, inplace=True)
    else:
        raise NotImplementedError('[ERROR] Activation layer [%s] is not implemented!' % act_type)
    return layer

def pad(pad_type, padding):
    pad_type = pad_type.lower()
    if padding == 0:
        return None

def norm(n_feature, norm_type='bn'):
    norm_type = norm_type.lower()
    if norm_type == 'bn':
        layer = nn.BatchNorm2d(n_feature)
    else:
        raise NotImplementedError('[ERROR] %s.sequential() does not support OrderedDict' % norm_type)
    return layer

def get_valid_padding(kernel_size, dilation):
    kernel_size = kernel_size + (kernel_size - 1) * (dilation - 1)
    padding = (kernel_size - 1) // 2
    return padding

def sequential(*args):
    if len(args) == 1:
        if isinstance(args[0], OrderedDict):
            raise NotImplementedError('[ERROR] %s.sequential() does not support OrderedDict' % sys.modules[__name__])
        else:
            return args[0]
    modules = []
    for module in args:
        if isinstance(module, nn.Sequential):
            for submodule in module:
                modules.append(submodule)
        elif isinstance(module, nn.Module):
            modules.append(module)
    return nn.Sequential(*modules)



def ConvBlock(in_channels, out_channels, kernel_size, stride=1, dilation=1, bias=True, valid_padding=True, padding=0,
              act_type='prelu', norm_type='bn', pad_type='zero'):
    if valid_padding:
        padding = get_valid_padding(kernel_size, dilation)
    else:
        pass
    p = pad(pad_type, padding) if pad_type and pad_type != 'zero' else None
    conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation,
                     bias=bias)

    act = activation(act_type) if act_type else None
    n = norm(out_channels, norm_type) if norm_type else None
    return sequential(p, conv, n, act)

class DenseLayer(nn.Module):
    def __init__(self, num_channels, growth):
        super(DenseLayer, self).__init__()
        self.conv = ConvBlock(num_channels, growth, kernel_size=3, act_type='lrelu', norm_type=None)

    def forward(self, x):
        out = self.conv(x)
        out = torch.cat((x, out), 1)
        return out

class mamba_bl(nn.Module):
    def __init__(self,d_model, d_state,expand,dropout,device):
        super(mamba_bl,self).__init__()

        self.spation_mamba=SS2D_fusion(d_model=d_model, d_state=d_state,expand=expand,dropout=dropout)
        self.frequency_mamba=SS2D_local(d_model=d_model, d_state=d_state,expand=expand,dropout=dropout)
        self.norm1=LayerNorm(dim=d_model)
        self.norm2=LayerNorm(dim=d_model)
        self.norm3 = LayerNorm(dim=d_model)
        self.xfm = DWTForward(J=2, mode='zero', wave='haar').to(device)
        self.ifm = DWTInverse(mode='zero', wave='haar').to(device)
        self.ln_11 = nn.LayerNorm(d_model)
        self.conv=nn.Conv2d(d_model*2,d_model,3,1,1)
        self.conv2=nn.Conv2d(d_model*2,d_model,3,1,1)
        # self.linear_out = nn.Linear(d_model * 2, d_model)
        self.skip_scale1= nn.Parameter(torch.ones(d_model))
        self.drop_path1 = DropPath(0)
    def frequency_mamba_process(self,prepare):
        Yl, Yh = self.xfm(prepare)
        h00 = torch.zeros(prepare.shape).float().to(prepare.device)
        for i in range(len(Yh)):
            if i == len(Yh) - 1:
                h00[:, :, :Yl.size(2), :Yl.size(3)] = Yl
                h00[:, :, :Yl.size(2), Yl.size(3):Yl.size(3) * 2] = Yh[i][:, :, 0, :, :]
                h00[:, :, Yl.size(2):Yl.size(2) * 2, :Yl.size(3)] = Yh[i][:, :, 1, :, :]
                h00[:, :, Yl.size(2):Yl.size(2) * 2, Yl.size(3):Yl.size(3) * 2] = Yh[i][:, :, 2, :, :]
            else:
                h00[:, :, :Yh[i].size(3), Yh[i].size(4):] = Yh[i][:, :, 0, :, :h00.shape[3] - Yh[i].size(4)]
                h00[:, :, Yh[i].size(3):, :Yh[i].size(4)] = Yh[i][:, :, 1, :h00.shape[2] - Yh[i].size(3), :]
                h00[:, :, Yh[i].size(3):, Yh[i].size(4):] = Yh[i][:, :, 2, :h00.shape[2] - Yh[i].size(3),
                                                            :h00.shape[3] - Yh[i].size(4)]
        h00 = rearrange(h00, "b c h w -> b h w c").contiguous()

        h11 = self.ln_11(h00)
        # # print(h11.shape,'h11shape')
        h11 = h00 * self.skip_scale1 + self.drop_path1(self.frequency_mamba(h11))

        h11 = rearrange(h11, "b h w c -> b c h w").contiguous()

        for i in range(len(Yh)):
            if i == len(Yh) - 1:
                Yl = h11[:, :, :Yl.size(2), :Yl.size(3)]
                Yh[i][:, :, 0, :, :] = h11[:, :, :Yl.size(2), Yl.size(3):Yl.size(3) * 2]
                Yh[i][:, :, 1, :, :] = h11[:, :, Yl.size(2):Yl.size(2) * 2, :Yl.size(3)]
                Yh[i][:, :, 2, :, :] = h11[:, :, Yl.size(2):Yl.size(2) * 2, Yl.size(3):Yl.size(3) * 2]
            else:
                Yh[i][:, :, 0, :, :h11.shape[3] - Yh[i].size(4)] = h11[:, :, :Yh[i].size(3), Yh[i].size(4):]
                Yh[i][:, :, 1, :h11.shape[2] - Yh[i].size(3), :] = h11[:, :, Yh[i].size(3):, :Yh[i].size(4)]
                Yh[i][:, :, 2, :h11.shape[2] - Yh[i].size(3), :h11.shape[3] - Yh[i].size(4)] = h11[:, :, Yh[i].size(3):,
                                                                                               Yh[i].size(4):]
        Yl = Yl.to(prepare.device)
        temp = self.ifm((Yl, [Yh[1]]))
        recons2 = self.ifm((temp, [Yh[0]])).to(prepare.device)
        return recons2

    def forward(self,vis_feature,ir_feature):
        fre_input=torch.cat([vis_feature,ir_feature],dim=1)
        fre_input=self.conv(fre_input)
        frequency_result=self.frequency_mamba_process(fre_input)
        vis_feature=self.norm1(vis_feature)
        ir_feature=self.norm2(ir_feature)
        vis_feature=torch.einsum('bchw->bhwc',vis_feature)
        ir_feature=torch.einsum('bchw->bhwc',ir_feature)
        spation_result=self.spation_mamba(x=vis_feature, xx=ir_feature)
        spation_result=torch.einsum('bhwc->bchw',spation_result)
        fusion=self.conv2(torch.cat([frequency_result, spation_result],dim=1))
        return fusion

class MambaFusion(nn.Module):
    def __init__(self,device):
        super(MambaFusion, self).__init__()
        d_state = 16
        expand = 2
        attn_drop_rate = 0
        self.mamba0=mamba_bl(d_model=64, d_state=d_state,expand=expand,dropout=attn_drop_rate,device=device)
        self.mamba1=mamba_bl(d_model=128, d_state=d_state,expand=expand,dropout=attn_drop_rate,device=device)
        self.mamba2=mamba_bl(d_model=256, d_state=d_state,expand=expand,dropout=attn_drop_rate,device=device)
        self.mamba3=mamba_bl(d_model=512, d_state=d_state,expand=expand,dropout=attn_drop_rate,device=device)
        self.encoder=img_encoder()
        self.decoder=img_decoder()

    def forward(self, vis, ir):
        vis_feature,ir_feature=self.encoder(vis,ir)
        feature=[]

        feature.append(self.mamba0(vis_feature[0],ir_feature[0]))
        feature.append(self.mamba1(vis_feature[1],ir_feature[1]))
        feature.append(self.mamba2(vis_feature[2],ir_feature[2]))
        feature.append(self.mamba3(vis_feature[3],ir_feature[3]))

        fusion= self.decoder(feature)

        return fusion



