import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet18

def avg_max_reduce_channel(x):
    res = []
    for xi in x:
        mean_value = torch.mean(xi, dim=1, keepdim=True)
        max_value, _ = torch.max(xi, dim=1, keepdim=True)
        res.extend([mean_value, max_value])
    return torch.concat(res, dim=1)

def avg_max_reduce_hw(x):
    res_avg = []
    res_max = []
    for xi in x:
        avg_pool = F.adaptive_avg_pool2d(xi, 1)
        max_pool = F.adaptive_max_pool2d(xi, 1)
        res_avg.append(avg_pool)
        res_max.append(max_pool)
    res = res_avg + res_max
    return torch.concat(res, dim=1)

class SE(nn.Module):
    def __init__(self, channel, reduction=16):
        super().__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(channel // reduction, channel, bias=False),
            nn.Sigmoid()
        )

    def forward(self, x):
        b, c, _, _ = x.size()
        y = self.avg_pool(x).view(b, c)
        y = self.fc(y).view(b, c, 1, 1)
        return x * y.expand_as(x)

class UAFM(nn.Module):
    """
    The base of Unified Attention Fusion Module.
    Args:
        x_ch (int): The channel of x tensor, which is the low level feature.
        y_ch (int): The channel of y tensor, which is the high level feature.
        out_ch (int): The channel of output tensor.
        ksize (int, optional): The kernel size of the conv for x tensor. Default: 3.
        resize_mode (str, optional): The resize model in unsampling y tensor. Default: bilinear.
    """
    def __init__(self, x_ch, y_ch, out_ch, ksize=3, resize_mode='bilinear'):
        super().__init__()
        self.conv_x = nn.Conv2d(x_ch, y_ch, kernel_size=ksize, padding=ksize // 2, bias=False)
        self.bn_x = nn.BatchNorm2d(y_ch)
        self.conv_out = nn.Conv2d(y_ch, out_ch, kernel_size=ksize, padding=ksize // 2, bias=False)
        self.bn_out = nn.BatchNorm2d(y_ch)
        self.resize_mode = resize_mode
        
    def fuse(self, x, y):
        out = x + y
        out = self.conv_out(out)
        out = self.bn_out(out)
        out = F.relu(out)
        
        return out
    
    def forward(self, x, y):
        """
        Args:
            x (Tensor): The low level feature.低级特征:特征图大但通道数小
            y (Tensor): The high level feature.高级特征:特征图小但通道数大
        """
        x = F.relu(self.bn_x(self.conv_x(x)))
        # y插到与x等大
        y = F.interpolate(y, x.shape[2:], mode=self.resize_mode, align_corners=True)
        out = self.fuse(x, y)
        return out
        
class UAFM_SpAtten(UAFM):
    # 调用UAFM的forward()函数,调用self的fuse()
    def __init__(self, x_ch, y_ch, out_ch, ksize=3, resize_mode='bilinear'):
        super().__init__(x_ch, y_ch, out_ch, ksize, resize_mode)
        self.conv_xy_atten = nn.Sequential(
            nn.Conv2d(4, 2, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(2),
            nn.ReLU(),
            nn.Conv2d(2, 1, kernel_size=3, padding=1, bias=False),
            nn.BatchNorm2d(1)
        )
        
    def fuse(self, x, y):
        
        atten = avg_max_reduce_channel([x, y])
        atten = torch.sigmoid(self.conv_xy_atten(atten))

        out = x * atten + y * (1 - atten)
        out = self.conv_out(out)
        return out
    
class UAFM_ChAtten(UAFM):
    """
    The UAFM with channel attention, which uses mean and max values.
    Args:
        x_ch (int): The channel of x tensor, which is the low level feature.
        y_ch (int): The channel of y tensor, which is the high level feature.
        out_ch (int): The channel of output tensor.
        ksize (int, optional): The kernel size of the conv for x tensor. Default: 3.
        resize_mode (str, optional): The resize model in unsampling y tensor. Default: bilinear.
    """

    def __init__(self, x_ch, y_ch, out_ch, ksize=3, resize_mode='bilinear'):
        super().__init__(x_ch, y_ch, out_ch, ksize, resize_mode)

        self.conv_xy_atten = nn.Sequential(
            nn.Conv2d(
                4 * y_ch,
                y_ch // 2,
                kernel_size=1,
                bias=False
            ),
            nn.BatchNorm2d(y_ch // 2),
            nn.LeakyReLU(),
            nn.Conv2d(
                y_ch // 2,
                y_ch,
                kernel_size=1,
                bias=False
            ),
            nn.BatchNorm2d(y_ch),
            )

    def fuse(self, x, y):
        """
        Args:
            x (Tensor): The low level feature.
            y (Tensor): The high level feature.
        """
        atten = avg_max_reduce_hw([x, y])
        atten = torch.sigmoid(self.conv_xy_atten(atten))

        out = x * atten + y * (1 - atten)
        out = self.conv_out(out)
        return out
    
class SPPM(nn.Module):
    """
    Simple Pyramid Pooling module.
    Args:
        in_channels (int): The number of input channels to pyramid pooling module.
        inter_channels (int): The number of inter channels to pyramid pooling module.
        out_channels (int): The number of output channels after pyramid pooling module.
        bin_sizes (tuple, optional): The out size of pooled feature maps. Default: (1, 2, 4).
        align_corners (bool): An argument of F.interpolate. It should be set to False
            when the output size of feature is even, e.g. 1024x512, otherwise it is True, e.g. 769x769.
    """

    def __init__(self,
                 in_channels,
                 inter_channels,
                 out_channels,
                 bin_sizes: tuple = (1, 2, 4),
                 align_corners=True):
        super().__init__()

        self.stages = nn.ModuleList([
            self._make_stage(in_channels, inter_channels, size) for size in bin_sizes
        ])
        self.conv_out = nn.Conv2d(inter_channels, out_channels, kernel_size=3, padding=1, bias=False)
        self.bn = nn.BatchNorm2d(out_channels)

        self.align_corners = align_corners

    def _make_stage(self, in_channels, out_channels, size):
        prior = nn.AdaptiveAvgPool2d(output_size=size)
        conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
        bn = nn.BatchNorm2d(out_channels)
        relu = nn.ReLU()
        return nn.Sequential(prior, conv, bn, relu)

    def forward(self, input):
        out = None
        input_shape = input.shape[2:]

        for stage in self.stages:
            x = stage(input)
            x = F.interpolate(
                x,
                input_shape,
                mode='bilinear',
                align_corners=self.align_corners)
            if out is None:
                out = x
            else:
                out += x

        out = F.relu(self.bn(self.conv_out(out)))
        return out    
    
class SegHead(nn.Module):
    '''
        in_channel: number of infeat3
    '''
    def __init__(self, in_channelf2, in_channelf3, out_class):
        super().__init__()
        
        self.transformf2 = nn.Conv2d(in_channelf2, in_channelf2, kernel_size=1, stride=1, bias=False) # 1/8
        self.bnf2 = nn.BatchNorm2d(in_channelf2)

        self.transformf3 = nn.Conv2d(in_channelf3, in_channelf3, kernel_size=1, stride=1, bias=False)
        self.bnf3 = nn.BatchNorm2d(in_channelf3)

        self.segblock1 = nn.Conv2d(in_channelf3, 64, kernel_size=3, stride=1, padding=1, bias=False) # 1/8
        self.bn1 = nn.BatchNorm2d(64)
        self.pool1 = nn.AvgPool2d(kernel_size=2, stride=2)  # 1/16
        
        self.segblock2 = nn.Conv2d(64, 96, kernel_size=3, stride=1, padding=1, bias=False)  # 1/16
        self.bn2 = nn.BatchNorm2d(96)
        self.pool2 = nn.AvgPool2d(kernel_size=2, stride=2)  # 1/32
        
        # SPPM
        self.pyramidpool = SPPM(96, 128, 128)   # dim=128, 1/32
        
        # UFAM segblock2 and SPPM, change dim of infeat4 from `in_channel_high` to 128
        self.arm1 = UAFM_SpAtten(64, 128, 128)   # 1/16
        
        self.arm2 = UAFM_ChAtten(in_channelf3, 128, 128)    # 1/8
        
        # self.arm3 = UAFM_SpAtten(16, 32, 16)
        
        self.conv1 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False)
        self.outconv = nn.Conv2d(128, out_class, kernel_size=1, bias=True)

    def forward(self, x):
        infeat2 = x[0]  # c2, 1/2
        infeat3 = x[1]    # C3, 1/8
        tffeat3 = self.bnf3(self.transformf3(infeat3))             # c=32, 1/8
        seg1 = self.segblock1(tffeat3)                            # c=64, 1/8
        seg1 = self.pool1(F.relu(self.bn1(seg1)))   # c=64, 1/16
        seg2 = self.segblock2(seg1)                 # c=96, 1/16
        seg2 = self.pool2(F.relu(self.bn2(seg2)))   # c=96, 1/32
        
        decoder1 = self.pyramidpool(seg2)           # c=128, 1/32
        
        decoder2 = self.arm1(seg1, decoder1)        # c=64,  1/16
        decoder3 = self.arm2(tffeat3, decoder2)      # c=128, 1/8
        decoder3 = F.interpolate(decoder3, scale_factor=2, mode="bilinear", align_corners=True) # c=128, 1/8
        decoder4 = self.conv1(decoder3)
        # decoder4 = self.arm3(infeat2, decoder3)     # c=16, 1/2
        decoder5 = self.outconv(decoder4)
        # decoder5 = self.conv2(decoder4)
        output = F.interpolate(decoder5, scale_factor=4, mode="bilinear", align_corners=True)       
        
        return output

    
class SimpleSeg(nn.Module):
    def __init__(self, num_class) -> None:
        super().__init__()
        self.res18 = resnet18()
        self.conv1 = self.res18.conv1
        self.bn1 = self.res18.bn1
        self.relu = self.res18.relu
        self.maxpool = self.res18.maxpool

        self.layer1 = self.res18.layer1
        self.layer2 = self.res18.layer2
        self.layer3 = self.res18.layer3
        self.layer4 = self.res18.layer4
        
        self.up1 = nn.Conv2d(512, 256, 3, 1, 1, bias=False)
        self.bn2 = nn.BatchNorm2d(256)

        self.up2 = nn.Conv2d(256, 128, 3, 1, 1, bias=False)
        self.bn3 = nn.BatchNorm2d(128)

        self.up3 = nn.Conv2d(128, 64, 3, 1, 1, bias=False)
        self.bn4 = nn.BatchNorm2d(64)

        self.up4 = nn.Conv2d(64, 32, 3, 1, 1, bias=False)
        self.bn5 = nn.BatchNorm2d(32)

        self.up5 = nn.Conv2d(32, num_class, 3, 1, 1, bias=False)
        self.bn6 = nn.BatchNorm2d(num_class)

    def forward(self, x):
        x = self.conv1(x)   # torch.Size([1, 64, 256, 512]) 1/2
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x) # torch.Size([1, 64, 128, 256]) 1/4

        x = self.layer1(x)  # torch.Size([1, 64, 128, 256]) 1/4
        x = self.layer2(x)  # torch.Size([1, 128, 64, 128]) 1/8
        x = self.layer3(x)  # torch.Size([1, 256, 32, 64]) 1/16
        x = self.layer4(x)  # torch.Size([1, 512, 16, 32]) 1/32

        x = F.relu(self.bn2(self.up1(x)))   # 256
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)   # 1/16

        x = F.relu(self.bn3(self.up2(x)))
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)   # 1/8

        x = F.relu(self.bn4(self.up3(x)))
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)   # 1/4

        x = F.relu(self.bn5(self.up4(x)))
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)   # 1/2

        x = F.relu(self.bn6(self.up5(x)))
        x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True)   # origin

        return x


if __name__ == "__main__":
    model = SimpleSeg(3)
    data = torch.rand(1,3,512,1024)
    out = model(data)
    print(out.shape)