import torch
import torch.nn as nn
import torch.nn.functional as F
import functools
import numpy as np
from torch.nn.modules.batchnorm import _BatchNorm

class ConBlock(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv1 = nn.Conv2d(in_channels, out_channels, 3, padding=1)
        self.relu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

    def forward(self, x):
        out = self.conv1(x)
        out = self.relu(out)
        return out
    
class SKFF(nn.Module):
    def __init__(self, in_channels, height=2, reduction=8, bias=False):
        super(SKFF, self).__init__()

        self.height = height
        d = max(int(in_channels / reduction), 4)

        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.conv_du = nn.Sequential(nn.Conv2d(in_channels, d, 1, padding=0, bias=bias), nn.PReLU())

        self.fcs = nn.ModuleList([])
        for i in range(self.height):
            self.fcs.append(nn.Conv2d(d, in_channels, kernel_size=1, stride=1, bias=bias))
        self.softmax = nn.Softmax(dim=1)

    def forward(self, inp_feats):
        batch_size = inp_feats[0].shape[0]
        n_feats = inp_feats[0].shape[1]
        # 长度为6      inp_feats：torch.Size([1, 384, 64, 64])
        inp_feats = torch.cat(inp_feats, dim=1)
        inp_feats = inp_feats.view(batch_size, self.height, n_feats, inp_feats.shape[2], inp_feats.shape[3])
        # torch.Size([1, 6, 64, 64, 64]) ->  torch.Size([1, 64, 64, 64])
        feats_U = torch.sum(inp_feats, dim=1)
        feats_S = self.avg_pool(feats_U)
        # torch.Size([1, 64, 1, 1])  ->  torch.Size([1, 8, 1, 1])
        feats_Z = self.conv_du(feats_S)
        # 原本是没有sigmid
        attention_vectors = [fc(feats_Z) for fc in self.fcs]
        #6个 torch.Size([1, 64, 1, 1])  ->  torch.Size([1, 384, 1, 1])
        attention_vectors = torch.cat(attention_vectors, dim=1)
        # torch.Size([1, 6, 64, 1, 1])
        attention_vectors = attention_vectors.view(batch_size, self.height, n_feats, 1, 1)

        # torch.Size([1, 64, 64, 64])  =     torch.Size([1, 6, 64, 1, 1])    *   torch.Size([1, 6, 64, 1, 1])
        attention_vectors = self.softmax(attention_vectors)
        # torch.Size([1, 6, 64, 64, 64])
        feats_V = torch.sum(inp_feats * attention_vectors, dim=1)

        return feats_V
    
class UNet_SKFF_v2(nn.Module):
    def __init__(self, nf):
        super().__init__()
        self.nf = nf
        base_ks = 3

        self.Down0_0 = nn.Sequential(
            nn.Conv2d(nf, nf, base_ks, stride=2, padding=base_ks // 2),
            nn.LeakyReLU(negative_slope=0.1, inplace=True)
        )
        self.conv0_0 = ConBlock(nf, nf)  # conv + act

        self.Down0_1 = nn.Sequential(
            nn.Conv2d(nf, nf, base_ks, stride=2, padding=base_ks // 2),
            nn.LeakyReLU(negative_slope=0.1, inplace=True)
        )
        self.conv0_1 = ConBlock(nf, nf)

        self.Down0_2 = nn.Sequential(
            nn.Conv2d(nf, nf, base_ks, stride=2, padding=base_ks // 2),
            nn.LeakyReLU(negative_slope=0.1, inplace=True)
        )
        self.conv0_2 = ConBlock(nf, nf)

        self.Up1 = nn.Sequential(
            nn.ConvTranspose2d(nf, nf, 4, stride=2, padding=1),
            nn.LeakyReLU(negative_slope=0.1, inplace=True)
        )

        self.SKFF_1 = SKFF(in_channels=nf, height=2, reduction=8)

        self.Up2 = nn.Sequential(
            nn.ConvTranspose2d(nf, nf, 4, stride=2, padding=1),
            nn.LeakyReLU(negative_slope=0.1, inplace=True)
        )

        self.SKFF_2 = SKFF(in_channels=nf, height=2, reduction=8)

        self.Up3 = nn.Sequential(
            nn.ConvTranspose2d(nf, nf, 4, stride=2, padding=1),
            nn.LeakyReLU(negative_slope=0.1, inplace=True)
        )
        # self.SKFF_3 = SKFF(in_channels=nf, height=2, reduction=8)

    def forward(self, input):
        # 1080 1920   ->    540     270  135   67.5
        x0_0 = self.conv0_0(self.Down0_0(input))
        x0_1 = self.conv0_1(self.Down0_1(x0_0))
        x0_2 = self.conv0_2(self.Down0_2(x0_1))

        up0_1 = self.Up1(x0_2)
        b,n,h,w = x0_1.shape

        up0_1 = up0_1[:,:,:h,:w]

        up0_2 = self.Up2(self.SKFF_1([up0_1, x0_1]))

        up0_3 = self.Up3(self.SKFF_1([up0_2, x0_0]))
        return up0_3+input