import torch
import itertools
from torch import nn
from torch.nn import functional as F
from torch.autograd import Function

from .ska import SKA
from .eucb import EUCB

# 修改 timm 导入，使用兼容的版本
try:
    from timm.models.vision_transformer import trunc_normal_
    from timm.models.layers import SqueezeExcite
except ImportError:
    # 如果 timm 版本不兼容，提供替代实现
    def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
        with torch.no_grad():
            tensor.normal_(mean=mean, std=std)
            mask = ((tensor < a) | (tensor > b))
            while mask.any():
                tensor[mask] = torch.normal(mean, std, size=tensor[mask].shape)
                mask = ((tensor < a) | (tensor > b))
            return tensor

    class SqueezeExcite(nn.Module):
        def __init__(self, in_chs, se_ratio=0.25, act_layer=nn.ReLU):
            super(SqueezeExcite, self).__init__()
            self.avg_pool = nn.AdaptiveAvgPool2d(1)
            self.fc1 = nn.Conv2d(in_chs, int(in_chs * se_ratio), 1)
            self.act1 = act_layer()
            self.fc2 = nn.Conv2d(int(in_chs * se_ratio), in_chs, 1)
            self.act2 = nn.Sigmoid()

        def forward(self, x):
            x_se = self.avg_pool(x)
            x_se = self.fc1(x_se)
            x_se = self.act1(x_se)
            x_se = self.fc2(x_se)
            x_se = self.act2(x_se)
            return x * x_se

def conv1x1(in_planes, out_planes, stride=1):
    """1x1 convolution without padding"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)


def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)


class Conv2d_BN(torch.nn.Sequential):
    def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1,
                 groups=1, bn_weight_init=1):
        super().__init__()
        self.add_module('c', torch.nn.Conv2d(
            a, b, ks, stride, pad, dilation, groups, bias=False))
        self.add_module('bn', torch.nn.BatchNorm2d(b))
        torch.nn.init.constant_(self.bn.weight, bn_weight_init)
        torch.nn.init.constant_(self.bn.bias, 0)

    @torch.no_grad()
    def fuse(self):
        c, bn = self._modules.values()
        w = bn.weight / (bn.running_var + bn.eps)**0.5
        w = c.weight * w[:, None, None, None]
        b = bn.bias - bn.running_mean * bn.weight / \
            (bn.running_var + bn.eps)**0.5
        m = torch.nn.Conv2d(w.size(1) * self.c.groups, w.size(
            0), w.shape[2:], stride=self.c.stride, padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups,
            device=c.weight.device)
        m.weight.data.copy_(w)
        m.bias.data.copy_(b)
        return m
class RepVGGDW(torch.nn.Module):
    def __init__(self, ed) -> None:
        super().__init__()
        self.conv = Conv2d_BN(ed, ed, 3, 1, 1, groups=ed)
        self.conv1 = Conv2d_BN(ed, ed, 1, 1, 0, groups=ed)
        self.dim = ed
    
    def forward(self, x):
        return self.conv(x) + self.conv1(x) + x
    
    @torch.no_grad()
    def fuse(self):
        conv = self.conv.fuse()
        conv1 = self.conv1.fuse()
        
        conv_w = conv.weight
        conv_b = conv.bias
        conv1_w = conv1.weight
        conv1_b = conv1.bias
        
        conv1_w = torch.nn.functional.pad(conv1_w, [1,1,1,1])

        identity = torch.nn.functional.pad(torch.ones(conv1_w.shape[0], conv1_w.shape[1], 1, 1, device=conv1_w.device), [1,1,1,1])

        final_conv_w = conv_w + conv1_w + identity
        final_conv_b = conv_b + conv1_b

        conv.weight.data.copy_(final_conv_w)
        conv.bias.data.copy_(final_conv_b)
        return conv
    
class Attention(torch.nn.Module):
    def __init__(self, dim, key_dim, num_heads=8,
                 attn_ratio=4,
                 resolution=14):
        super().__init__()
        self.num_heads = num_heads
        self.scale = key_dim ** -0.5
        self.key_dim = key_dim
        self.nh_kd = nh_kd = key_dim * num_heads
        self.d = int(attn_ratio * key_dim)
        self.dh = int(attn_ratio * key_dim) * num_heads
        self.attn_ratio = attn_ratio
        h = self.dh + nh_kd * 2
        self.qkv = Conv2d_BN(dim, h, ks=1)
        self.proj = torch.nn.Sequential(torch.nn.ReLU(), Conv2d_BN(
            self.dh, dim, bn_weight_init=0))
        self.dw = Conv2d_BN(nh_kd, nh_kd, 3, 1, 1, groups=nh_kd)
        self.resolution = resolution
        points = list(itertools.product(range(resolution), range(resolution)))
        N = len(points)
        attention_offsets = {}
        idxs = []
        for p1 in points:
            for p2 in points:
                offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
                if offset not in attention_offsets:
                    attention_offsets[offset] = len(attention_offsets)
                idxs.append(attention_offsets[offset])
        self.attention_biases = torch.nn.Parameter(
            torch.zeros(num_heads, len(attention_offsets)))
        self.register_buffer('attention_bias_idxs',
                             torch.LongTensor(idxs).view(N, N))

    @torch.no_grad()
    def train(self, mode=True):
        super().train(mode)
        if mode and hasattr(self, 'ab'):
            del self.ab
        else:
            self.ab = self.attention_biases[:, self.attention_bias_idxs]

    def forward(self, x):
        B, _, H, W = x.shape
        N = H * W
        qkv = self.qkv(x)
        q, k, v = qkv.view(B, -1, H, W).split([self.nh_kd, self.nh_kd, self.dh], dim=1)
        
        # 应用深度可分离卷积
        q = self.dw(q)
        
        # 重塑张量以适应注意力计算
        q = q.view(B, self.num_heads, self.key_dim, N).permute(0, 1, 3, 2)  # B, num_heads, N, key_dim
        k = k.view(B, self.num_heads, self.key_dim, N).permute(0, 1, 2, 3)  # B, num_heads, key_dim, N
        
        # 修正 v 的维度计算
        v_dim = self.dh // self.num_heads
        v = v.view(B, self.num_heads, v_dim, N).permute(0, 1, 3, 2)  # B, num_heads, N, v_dim
        
        # 计算注意力
        attn = (q @ k) * self.scale  # B, num_heads, N, N
        
        # 动态计算注意力偏置
        if H * W != self.resolution * self.resolution:
            # 如果输入分辨率与初始化时不同，需要重新计算注意力偏置
            points = list(itertools.product(range(H), range(W)))
            N_new = len(points)
            attention_offsets = {}
            idxs = []
            for p1 in points:
                for p2 in points:
                    offset = (abs(p1[0] - p2[0]), abs(p1[1] - p2[1]))
                    if offset not in attention_offsets:
                        attention_offsets[offset] = len(attention_offsets)
                    idxs.append(attention_offsets[offset])
            attention_bias_idxs = torch.LongTensor(idxs).view(N_new, N_new).to(x.device)
            attention_biases = torch.zeros(self.num_heads, len(attention_offsets), device=x.device)
            attn = attn + attention_biases[:, attention_bias_idxs]
        else:
            # 使用预计算的注意力偏置
            if self.training:
                attn = attn + self.attention_biases[:, self.attention_bias_idxs]
            else:
                attn = attn + self.ab
            
        attn = attn.softmax(dim=-1)
        
        # 应用注意力
        x = (attn @ v).permute(0, 1, 3, 2).reshape(B, self.dh, H, W)
        x = self.proj(x)
        return x
class Residual(torch.nn.Module):
    def __init__(self, m, drop=0.):
        super().__init__()
        self.m = m
        self.drop = drop

    def forward(self, x):
        if self.training and self.drop > 0:
            return x + self.m(x) * torch.rand(x.size(0), 1, 1, 1,
                                              device=x.device).ge_(self.drop).div(1 - self.drop).detach()
        else:
            return x + self.m(x)
class LKP(nn.Module):
    def __init__(self, dim, lks, sks, groups):
        super().__init__()
        self.cv1 = Conv2d_BN(dim, dim // 2)
        self.act = nn.ReLU()
        self.cv2 = Conv2d_BN(dim // 2, dim // 2, ks=lks, pad=(lks - 1) // 2, groups=dim // 2)
        self.cv3 = Conv2d_BN(dim // 2, dim // 2)
        self.cv4 = nn.Conv2d(dim // 2, sks ** 2 * dim // groups, kernel_size=1)
        self.norm = nn.GroupNorm(num_groups=dim // groups, num_channels=sks ** 2 * dim // groups)
        
        self.sks = sks
        self.groups = groups
        self.dim = dim
        
    def forward(self, x):
        x = self.act(self.cv3(self.cv2(self.act(self.cv1(x)))))
        w = self.norm(self.cv4(x))
        b, _, h, width = w.size()
        w = w.view(b, self.dim // self.groups, self.sks ** 2, h, width)
        return w
class LSConv(nn.Module):
    def __init__(self, dim):
        super(LSConv, self).__init__()
        self.lkp = LKP(dim, lks=7, sks=3, groups=8)
        self.ska = SKA()
        self.bn = nn.BatchNorm2d(dim)

    def forward(self, x):
        return self.bn(self.ska(x, self.lkp(x))) + x
class FFN(torch.nn.Module):
    def __init__(self, ed, h):
        super().__init__()
        self.pw1 = Conv2d_BN(ed, h)
        self.act = torch.nn.ReLU()
        self.pw2 = Conv2d_BN(h, ed, bn_weight_init=0)

    def forward(self, x):
        x = self.pw2(self.act(self.pw1(x)))
        return x

class Block(torch.nn.Module):    
    def __init__(self,
                 ed, kd, nh=8,
                 ar=4,
                 resolution=14,
                 stage=-1, depth=-1):
        super().__init__()
            
        if depth % 2 == 0:
            self.mixer = RepVGGDW(ed)
            self.se = SqueezeExcite(ed, 0.25)
        else:
            self.se = torch.nn.Identity()
            if stage == 3:
                self.mixer = Residual(Attention(ed, kd, nh, ar, resolution=resolution))
            else:
                self.mixer = LSConv(ed)

        self.ffn = Residual(FFN(ed, int(ed * 2)))

    def forward(self, x):
        return self.ffn(self.se(self.mixer(x)))
class BN_Linear(torch.nn.Sequential):
    def __init__(self, a, b, bias=True, std=0.02):
        super().__init__()
        self.add_module('bn', torch.nn.BatchNorm1d(a, track_running_stats=True))
        self.add_module('l', torch.nn.Linear(a, b, bias=bias))
        trunc_normal_(self.l.weight, std=std)
        if bias:
            torch.nn.init.constant_(self.l.bias, 0)

    def forward(self, x):
        # 处理 batch size 为 1 的情况
        if x.size(0) == 1 and self.training:
            # 在训练模式下，如果 batch size 为 1，使用 eval 模式
            self.bn.eval()
            x = self.bn(x)
            self.bn.train()
        else:
            x = self.bn(x)
        x = self.l(x)
        return x

    @torch.no_grad()
    def fuse(self):
        bn, l = self._modules.values()
        w = bn.weight / (bn.running_var + bn.eps)**0.5
        b = bn.bias - self.bn.running_mean * \
            self.bn.weight / (bn.running_var + bn.eps)**0.5
        w = l.weight * w[None, :]
        if l.bias is None:
            b = b @ self.l.weight.T
        else:
            b = (l.weight @ b[:, None]).view(-1) + self.l.bias
        m = torch.nn.Linear(w.size(1), w.size(0), device=l.weight.device)
        m.weight.data.copy_(w)
        m.bias.data.copy_(b)
        return m
def channel_shuffle(x, groups):
    batchsize, num_channels, height, width = x.data.size()
    channels_per_group = num_channels // groups
    # reshape
    x = x.view(batchsize, groups,
               channels_per_group, height, width)
    x = torch.transpose(x, 1, 2).contiguous()
    # flatten
    x = x.view(batchsize, -1, height, width)
    return x

def act_layer(act, inplace=False, neg_slope=0.2, n_prelu=1):
    # activation layer
    act = act.lower()
    if act == 'relu':
        layer = nn.ReLU(inplace)
    elif act == 'relu6':
        layer = nn.ReLU6(inplace)
    elif act == 'leakyrelu':
        layer = nn.LeakyReLU(neg_slope, inplace)
    elif act == 'prelu':
        layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
    elif act == 'gelu':
        layer = nn.GELU()
    elif act == 'hswish':
        layer = nn.Hardswish(inplace)
    else:
        raise NotImplementedError('activation layer [%s] is not found' % act)
    return layer


class LSNetFPN_8_2(torch.nn.Module):
    def __init__(self, img_size=480,
                 patch_size=16,
                 in_chans=1,
                 embed_dim=[64, 128, 192, 256],
                 key_dim=[16, 16, 16, 16],
                 depth=[1, 2, 3, 4],
                 num_heads=[4, 4, 4, 4],):
        super().__init__()

        self.patch_embed = torch.nn.Sequential(Conv2d_BN(in_chans, embed_dim[0] // 4, 3, 2, 1), torch.nn.ReLU(),
                                Conv2d_BN(embed_dim[0] // 4, embed_dim[0] // 2, 3, 2, 1), torch.nn.ReLU(),
                                Conv2d_BN(embed_dim[0] // 2, embed_dim[0], 3, 2, 1)
                           )

        # 初始化分辨率序列
        initial_res  = img_size // patch_size
        self.resolutions = [initial_res ]
        
        # 预构建层列表
        block_layers = [[] for _ in range(4)]
        # attn_ratio = [ed/(kd*nh) for ed,kd,nh in zip(embed_dim,key_dim,num_heads)]
        attn_ratio = [ed/(kd*nh) for ed,kd,nh in zip(embed_dim,key_dim,num_heads)]  # 只使用前3个参数
        
        for i, (ed, kd, dpth, nh, ar) in enumerate(
                zip(embed_dim, key_dim, depth, num_heads, attn_ratio)):
            for d in range(dpth):
                block_layers[i].append(Block(ed, kd, nh, ar, self.resolutions[i], stage=i, depth=d))
            # 添加下采样过渡层
            if i != len(depth) - 1:
                block_layers[i+1].extend([
                    Conv2d_BN(ed, ed, ks=3, stride=2, pad=1, groups=ed),
                    Conv2d_BN(ed, embed_dim[i+1], ks=1, stride=1, pad=0)
                ])
                self.resolutions.append((self.resolutions[-1]-1)//2 +1)
        
        self.blocks1 = nn.Sequential(*block_layers[0])
        self.blocks2 = nn.Sequential(*block_layers[1])
        self.blocks3 = nn.Sequential(*block_layers[2])
        self.blocks4 = nn.Sequential(*block_layers[3])
        
        self.l1 = nn.Sequential(
            EUCB(in_channels=embed_dim[0], out_channels=embed_dim[1]),
            torch.nn.ReLU(),
            EUCB(in_channels=embed_dim[1], out_channels=embed_dim[1]),
            torch.nn.ReLU(),
        )
        self.l2 = nn.Sequential(
            EUCB(in_channels=embed_dim[1], out_channels=embed_dim[2]),
            torch.nn.ReLU(),
            EUCB(in_channels=embed_dim[2], out_channels=embed_dim[2]),
            torch.nn.ReLU(),
        )
        self.l3 = nn.Sequential(
            EUCB(in_channels=embed_dim[3], out_channels=embed_dim[3]),
            torch.nn.ReLU(),
            EUCB(in_channels=embed_dim[3], out_channels=embed_dim[3]),
            torch.nn.ReLU(),
        )
        
        block_dims = [embed_dim[1], embed_dim[2], embed_dim[3]]
        
        # 3. FPN upsample
        self.layer3_outconv = conv1x1(block_dims[2], block_dims[2])
        self.layer2_outconv = conv1x1(block_dims[1], block_dims[2])
        self.layer2_outconv2 = nn.Sequential(
            conv3x3(block_dims[2], block_dims[2]),
            nn.BatchNorm2d(block_dims[2]),
            nn.LeakyReLU(),
            conv3x3(block_dims[2], block_dims[1]),
        )
        self.layer1_outconv = conv1x1(block_dims[0], block_dims[1])
        self.layer1_outconv2 = nn.Sequential(
            conv3x3(block_dims[1], block_dims[1]),
            nn.BatchNorm2d(block_dims[1]),
            nn.LeakyReLU(),
            conv3x3(block_dims[1], block_dims[0]),
        )

    def no_weight_decay(self):
        return {x for x in self.state_dict().keys() if 'attention_biases' in x}

    def forward(self, x):
        print("x : " + str(x.shape))
        x = self.patch_embed(x)
        # print("self.patch_embed(x) : " + str(x.shape)) 
        x = self.blocks1(x)
        # print("self.blocks1(x) : " + str(x.shape))
        x1 = self.l1(x)
        x = self.blocks2(x)
        # print("self.blocks2(x) : " + str(x.shape))
        x2 = self.l2(x)
        x = self.blocks3(x)
        # print("self.blocks3(x) : " + str(x.shape))
        x = self.blocks4(x)
        # print("self.blocks4(x) : " + str(x.shape))
        x3 = self.l3(x)
        
        # FPN
        x3_out = self.layer3_outconv(x3)

        x2_out = self.layer2_outconv(x2)
        x3_out_2x = F.interpolate(x3_out, size=(x2_out.shape[2:]), mode='bilinear', align_corners=True)
        x2_out = self.layer2_outconv2(x2_out + x3_out_2x)

        x1_out = self.layer1_outconv(x1)
        x2_out_2x = F.interpolate(x2_out, size=(x1_out.shape[2:]), mode='bilinear', align_corners=True)
        x1_out = self.layer1_outconv2(x1_out + x2_out_2x)

        return [x3_out, x1_out] 

if __name__ == '__main__':
    x = torch.randn(1, 1, 448, 448).to('cuda')
    model = LSNetFPN_8_2(img_size=448).to('cuda')
    y = model(x)
    print(y[0].shape, y[1].shape)