import torch
from torch import nn
from .conv import Conv, DWConv


class Channel(nn.Module):
    def __init__(self, c1):
        super().__init__()
        self.dwconv = DWConv(c1, c1, k=3, s=1, act=False)
        self.apt = nn.AdaptiveAvgPool2d(1)
        self.act = nn.Sigmoid()

    def forward(self, x):
        return self.act(self.apt(self.dwconv(x)))


class Spatial(nn.Module):
    def __init__(self, c1):
        super().__init__()
        self.cv1 = nn.Conv2d(c1, out_channels=1, kernel_size=(1, 1), stride=(1, 1))
        self.bn = nn.BatchNorm2d(1)
        self.act = nn.Sigmoid()

    def forward(self, x):
        return self.act(self.bn(self.cv1(x)))


class FCM(nn.Module):
    def __init__(self, c1: int, c2: int, split_ratio: float = 0.25, final_conv: bool = True):
        """
        Initialize Spatial-attention module.

        Args:
            c1 (int): Number of input channels.
            c2 (int): Number of output channels.
            split_ratio (float): Ratio of the first channel segmentation, and should be limited to [0,1]
            final_conv (bool): Whether to add the last 1x1 conv layer.
        """
        super().__init__()
        # 计算两个分支的通道数
        self.ch1 = int(c1 * split_ratio)
        self.ch2 = c1 - self.ch1

        # 确保通道数不为0
        self.ch1 = max(1, self.ch1)
        self.ch2 = max(1, self.ch2)

        # 第一个分支的卷积层（3个卷积）
        self.cv1 = Conv(self.ch1, self.ch1, 3, 1, 1)
        self.cv1_2 = Conv(self.ch1, self.ch1, 3, 1, 1)
        self.cv1_3 = Conv(self.ch1, c1, 1, 1)

        # 第二个分支的卷积层（1个卷积）
        self.cv2 = Conv(self.ch2, c1, 1, 1)

        # 注意力模块
        self.spatial = Spatial(c1)  # 假设Spatial已定义
        self.channel = Channel(c1)  # 假设Channel已定义

        # 最终的1x1卷积
        self.final_conv = Conv(c1, c2, 1, 1) if final_conv else nn.Identity()

    def forward(self, x):
        x1, x2 = torch.split(x, [self.ch1, self.ch2], dim=1)
        x3 = self.cv1_3(self.cv1_2(self.cv1(x1)))
        x4 = self.cv2(x2)

        # 注意力交互与融合
        x5 = self.channel(x3) * x4 + self.spatial(x4) * x3

        # 最终卷积
        return self.final_conv(x5)


class MKP(nn.Module):
    def __init__(self, c1: int, c2: int):
        super().__init__()
        self.cv1 = DWConv(c1, c1, k=3, s=1, act=True)
        self.cv2 = Conv(c1, c1, k=1, s=1)
        self.cv3 = DWConv(c1, c1, k=5, s=1, act=True)
        self.cv4 = Conv(c1, c1, k=1, s=1)
        self.cv5 = DWConv(c1, c2, k=7, s=1, act=True)

    def forward(self, x):
        return self.cv5(self.cv4(self.cv3(self.cv2(self.cv1(x))))) + x


class Down(nn.Module):
    def __init__(self, c1, c2):
        super().__init__()
        self.cv1 = Conv(c1, c1, 3, 2, 1, g=c1 // 2, act=False)
        self.cv2 = Conv(c1, c2, 1, 1)

    def forward(self, x):
        x2 = self.cv1(x)
        x2 = self.cv2(x2)
        return self.cv2(self.cv1(x))
