# Ultralytics YOLO 🚀, AGPL-3.0 license
"""Convolution modules."""

import math

import numpy as np
import torch
import torch.nn as nn

from ultralytics.nn.modules.conv import Conv2  # 确保文件路径正确
from ultralytics.nn.modules.block import Attention

__all__ = (
    "CustomBackbone",
)

import torch
import torch.nn as nn
import timm



class VisibleLayer(nn.Module):
    def __init__(self):
        super(VisibleLayer, self).__init__()
        pass

    def forward(self, x):
        pass


class IRLayer(nn.Module):
    def __init__(self):
        super(IRLayer, self).__init__()
        pass

    def forward(self, x):
        pass


import torch
import torch.nn as nn

class CrossAttention(nn.Module):
    def __init__(self, dim, num_heads, attn_ratio=0.5):
        super(CrossAttention, self).__init__()
        self.num_heads = num_heads
        self.head_dim = dim // num_heads
        self.scale = (self.head_dim) ** -0.5

        # Q, K, V 的线性投影
        self.q_proj = nn.Linear(dim, dim, bias=False)
        self.k_proj = nn.Linear(dim, dim, bias=False)
        self.v_proj = nn.Linear(dim, dim, bias=False)
        self.out_proj = nn.Linear(dim, dim, bias=False)

    def forward(self, query, key, value):
        B, C, H, W = query.shape
        query = query.flatten(2).permute(0, 2, 1)  # (B, HW, C)
        key = key.flatten(2).permute(0, 2, 1)  # (B, HW, C)
        value = value.flatten(2).permute(0, 2, 1)  # (B, HW, C)

        # Q, K, V 投影
        Q = self.q_proj(query)  # (B, HW, C)
        K = self.k_proj(key)  # (B, HW, C)
        V = self.v_proj(value)  # (B, HW, C)

        # 多头划分
        Q = Q.view(B, -1, self.num_heads, self.head_dim).transpose(1, 2)  # (B, num_heads, HW, head_dim)
        K = K.view(B, -1, self.num_heads, self.head_dim).transpose(1, 2)  # (B, num_heads, HW, head_dim)
        V = V.view(B, -1, self.num_heads, self.head_dim).transpose(1, 2)  # (B, num_heads, HW, head_dim)

        # Attention 计算
        attn_weights = (Q @ K.transpose(-2, -1)) * self.scale  # (B, num_heads, HW, HW)
        attn_weights = attn_weights.softmax(dim=-1)

        # Attention 应用到 V
        out = (attn_weights @ V).transpose(1, 2).reshape(B, -1, C)  # (B, HW, C)
        out = self.out_proj(out)  # (B, HW, C)

        # 恢复形状
        out = out.permute(0, 2, 1).view(B, C, H, W)  # (B, C, H, W)
        return out



# 红外网络
class CustomBackbone(nn.Module):
    def __init__(self):
        super(CustomBackbone, self).__init__()

        self.sequential1 = nn.Sequential(
            Conv2(3, 64, 3, 2),
            Conv2(64, 128, 3, 2),
            Conv2(128, 256, 3, 2),
            Attention(256, 8, 0.5),
        )

        self.sequential2 = nn.Sequential(
            Conv2(1, 64, 3, 2),
            Conv2(64, 128, 3, 2),
            Conv2(128, 256, 3, 2),
            Attention(256, 8, 0.5),
        )

        # self.sequential3 = nn.Sequential(
        #     Conv2(256, 512, 3, 2), # 40*40
        #     Conv2(512, 256, 1),
        #     Conv2(256, 512, 3, 2),  # 20*20
        # )

        # 交叉注意力模块
        self.cross_attention = CrossAttention(dim=256, num_heads=8, attn_ratio=0.5)
        pass

    def forward(self, x):
        # 根据通道大小提取出 可见光和 红外光
        visible_x = x[:, :3, :, :]
        lr_x = x[:, 3:, :, :]

        visible_x = self.sequential1(visible_x)
        lr_x = self.sequential2(lr_x)

        # 应用交叉注意力机制
        visible_x_attn = self.cross_attention(visible_x, lr_x, lr_x)  # visible_x <- lr_x
        lr_x_attn = self.cross_attention(lr_x, visible_x, visible_x)  # lr_x <- visible_x

        fused_features = (visible_x_attn + lr_x_attn) / 2

        print("输出形状：", fused_features.shape)
        return fused_features


if __name__ == '__main__':
    from torchinfo import summary
    model = CustomBackbone()

    summary(model, input_size=(1, 4, 640, 640))
    pass