import torch
import torch.nn.functional as F
from timm.models.efficientnet import tf_efficientnet_b0_ns
from torch import nn

from timm.models import vit_base_patch16_224
from timm.models.efficientnet_blocks import ConvBnAct

__all__ = ['unet_color_space']

from timm.models.self_attn import Self_Attn


class DoubleConv(nn.Module):
    """(convolution =&gt; [BN] =&gt; ReLU) * 2"""

    def __init__(self, in_channels, out_channels, mid_channels=None):
        super().__init__()
        if not mid_channels:
            mid_channels = out_channels
        self.double_conv = nn.Sequential(
            nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(mid_channels),
            nn.ReLU(inplace=True),
            nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(inplace=True)
        )

    def forward(self, x):
        return self.double_conv(x)


class Down(nn.Module):
    """Downscaling with maxpool then double conv"""

    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.maxpool_conv = nn.Sequential(
            nn.MaxPool2d(2),
            DoubleConv(in_channels, out_channels)
        )

    def forward(self, x):
        return self.maxpool_conv(x)


class Up(nn.Module):
    """Upscaling then double conv"""

    def __init__(self, in_channels, out_channels, bilinear=True):
        super().__init__()

        # if bilinear, use the normal convolutions to reduce the number of channels
        if bilinear:
            self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        else:
            self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)
        self.conv = DoubleConv(in_channels, out_channels)

    def forward(self, x1, x2):
        x1 = self.up(x1)
        # input is CHW
        diffY = x2.size()[2] - x1.size()[2]
        diffX = x2.size()[3] - x1.size()[3]

        x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                        diffY // 2, diffY - diffY // 2])
        # if you have padding issues, see
        # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
        # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
        x = torch.cat([x2, x1], dim=1)
        return self.conv(x)


class OutConv(nn.Module):
    def __init__(self, in_channels, out_channels):
        super(OutConv, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)

    def forward(self, x):
        return self.conv(x)


class RefUnet(nn.Module):

    def __init__(self, in_ch, inc_ch):
        super(RefUnet, self).__init__()

        self.conv0 = nn.Conv2d(in_ch, inc_ch, 3, padding=1)

        self.conv1 = nn.Conv2d(inc_ch, 64, 3, padding=1)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu1 = nn.ReLU(inplace=True)

        self.pool1 = nn.MaxPool2d(2, 2, ceil_mode=True)

        self.conv2 = nn.Conv2d(64, 64, 3, padding=1)
        self.bn2 = nn.BatchNorm2d(64)
        self.relu2 = nn.ReLU(inplace=True)

        self.pool2 = nn.MaxPool2d(2, 2, ceil_mode=True)

        self.conv3 = nn.Conv2d(64, 64, 3, padding=1)
        self.bn3 = nn.BatchNorm2d(64)
        self.relu3 = nn.ReLU(inplace=True)

        self.pool3 = nn.MaxPool2d(2, 2, ceil_mode=True)

        self.conv4 = nn.Conv2d(64, 64, 3, padding=1)
        self.bn4 = nn.BatchNorm2d(64)
        self.relu4 = nn.ReLU(inplace=True)

        self.pool4 = nn.MaxPool2d(2, 2, ceil_mode=True)

        #####

        self.conv5 = nn.Conv2d(64, 64, 3, padding=1)
        self.bn5 = nn.BatchNorm2d(64)
        self.relu5 = nn.ReLU(inplace=True)

        #####

        self.conv_d4 = nn.Conv2d(128, 64, 3, padding=1)
        self.bn_d4 = nn.BatchNorm2d(64)
        self.relu_d4 = nn.ReLU(inplace=True)

        self.conv_d3 = nn.Conv2d(128, 64, 3, padding=1)
        self.bn_d3 = nn.BatchNorm2d(64)
        self.relu_d3 = nn.ReLU(inplace=True)

        self.conv_d2 = nn.Conv2d(128, 64, 3, padding=1)
        self.bn_d2 = nn.BatchNorm2d(64)
        self.relu_d2 = nn.ReLU(inplace=True)

        self.conv_d1 = nn.Conv2d(128, 64, 3, padding=1)
        self.bn_d1 = nn.BatchNorm2d(64)
        self.relu_d1 = nn.ReLU(inplace=True)

        self.conv_d0 = nn.Conv2d(64, 1, 3, padding=1)

        self.upscore2 = nn.Upsample(scale_factor=2, mode='bilinear')

    def forward(self, x):
        hx = x
        hx = self.conv0(hx)

        hx1 = self.relu1(self.bn1(self.conv1(hx)))
        hx = self.pool1(hx1)

        hx2 = self.relu2(self.bn2(self.conv2(hx)))
        hx = self.pool2(hx2)

        hx3 = self.relu3(self.bn3(self.conv3(hx)))
        hx = self.pool3(hx3)

        hx4 = self.relu4(self.bn4(self.conv4(hx)))
        hx = self.pool4(hx4)

        hx5 = self.relu5(self.bn5(self.conv5(hx)))

        hx = self.upscore2(hx5)

        d4 = self.relu_d4(self.bn_d4(self.conv_d4(torch.cat((hx, hx4), 1))))
        hx = self.upscore2(d4)

        d3 = self.relu_d3(self.bn_d3(self.conv_d3(torch.cat((hx, hx3), 1))))
        hx = self.upscore2(d3)

        d2 = self.relu_d2(self.bn_d2(self.conv_d2(torch.cat((hx, hx2), 1))))
        hx = self.upscore2(d2)

        d1 = self.relu_d1(self.bn_d1(self.conv_d1(torch.cat((hx, hx1), 1))))

        residual = self.conv_d0(d1)

        return x + residual


class UnetColorSpaceGenerator(nn.Module):

    def __init__(self, in_channels, n_classes, bilinear=False):
        super(UnetColorSpaceGenerator, self).__init__()
        self.bilinear = bilinear

        self.down1 = Down(in_channels, 32)
        self.down2 = Down(32, 64)
        self.down3 = Down(64, 128)
        self.down4 = Down(128, 256)
        self.down5 = Down(256, 512)
        self.up1 = Up(512, 256, bilinear)
        self.up2 = Up(256, 128, bilinear)
        self.up3 = Up(128, 64, bilinear)
        self.up4 = Up(64, 32, bilinear)
        self.up5 = nn.Sequential(
            nn.Conv2d(32, 3, 1, bias=False),
            nn.BatchNorm2d(3),
            nn.ReLU(inplace=True),
            nn.UpsamplingBilinear2d(scale_factor=2)
        )

    def forward(self, x):
        e1 = self.down1(x)
        e2 = self.down2(e1)
        e3 = self.down3(e2)
        e4 = self.down4(e3)
        e5 = self.down5(e4)
        d1 = self.up1(e5, e4)
        d2 = self.up2(d1, e3)
        d3 = self.up3(d2, e2)
        d4 = self.up4(d3, e1)
        d5 = self.up5(d4)

        return d5


class DeConvBnAct(nn.Module):
    def __init__(self, in_chs, out_chs, kernel_size,
                 stride=1, padding=0, dilation=1, output_padding=0, act_layer=nn.ReLU,
                 norm_layer=nn.BatchNorm2d, norm_kwargs=None):
        super(DeConvBnAct, self).__init__()
        norm_kwargs = norm_kwargs or {}
        self.conv = nn.ConvTranspose2d(in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, padding=padding,
                                       output_padding=output_padding)
        self.bn1 = norm_layer(out_chs, **norm_kwargs)
        self.act1 = act_layer(inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn1(x)
        x = self.act1(x)
        return x


def _upsample_like(src, tgt):
    src = F.interpolate(src, size=tgt.shape[2:], mode='bilinear', align_corners=True)
    return src


class UnetColorSpaceGeneratorV2(nn.Module):

    def __init__(self, in_channels, out_channels=3):
        super(UnetColorSpaceGeneratorV2, self).__init__()

        self.block1 = ConvBnAct(in_channels, 8, kernel_size=3, stride=1, pad_type='same')
        self.down1 = ConvBnAct(8, 8, kernel_size=3, stride=2)
        self.block2 = ConvBnAct(8, 16, kernel_size=3, stride=1, pad_type='same')
        self.down2 = ConvBnAct(16, 16, kernel_size=3, stride=2)
        self.block3 = ConvBnAct(16, 32, kernel_size=3, stride=1, pad_type='same')
        self.down3 = ConvBnAct(32, 32, kernel_size=3, stride=2)
        self.block4 = ConvBnAct(32, 64, kernel_size=3, stride=1, pad_type='same')
        self.down4 = ConvBnAct(64, 64, kernel_size=3, stride=2)
        self.block5 = ConvBnAct(64, 128, kernel_size=3, stride=1, pad_type='same')
        self.down5 = ConvBnAct(128, 128, kernel_size=3, stride=2)

        self.block6 = ConvBnAct(128, 128, kernel_size=3, stride=1, pad_type='same')
        self.block7 = ConvBnAct(192, 64, kernel_size=3, stride=1, pad_type='same')
        self.block8 = ConvBnAct(96, 32, kernel_size=3, stride=1, pad_type='same')
        self.block9 = ConvBnAct(48, 16, kernel_size=3, stride=1, pad_type='same')
        self.block10 = ConvBnAct(24, 8, kernel_size=3, stride=1, pad_type='same')
        self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        self.block11 = ConvBnAct(8, out_channels, kernel_size=3, stride=1, pad_type='same')
        # self.self_attn = Self_Attn(8)

    def forward(self, x):
        x1 = self.block1(x)
        d1 = self.down1(x1)  # (8, 112, 112)
        x2 = self.block2(d1)
        d2 = self.down2(x2)  # (16, 56, 56)
        x3 = self.block3(d2)
        d3 = self.down3(x3)  # (32, 28, 28)
        x4 = self.block4(d3)
        d4 = self.down4(x4)  # (64, 14, 14)
        x5 = self.block5(d4)
        d5 = self.down5(x5)  # (128, 7, 7)

        x6 = self.block6(d5)  # (128, 7, 7)
        x7 = _upsample_like(x6, d4)
        x8 = torch.cat((d4, x7), dim=1)
        x9 = self.block7(x8)  # (64, 14, 14)
        x10 = _upsample_like(x9, d3)
        x11 = torch.cat((d3, x10), dim=1)
        x12 = self.block8(x11)  # (32, 28, 28)
        x13 = _upsample_like(x12, d2)
        x14 = torch.cat((d2, x13), dim=1)
        x15 = self.block9(x14)  # (16, 56, 56)
        x16 = _upsample_like(x15, d1)
        x17 = torch.cat((d1, x16), dim=1)
        x18 = self.block10(x17)  # (8, 112, 112)
        # x19 = self.self_attn(x18)
        x20 = self.up(x18)
        x21 = self.block11(x20)  # (3, 224, 224)


        # output = []
        # output.append(d1)  # 第1个
        # output.append(d2)  # 第2个
        # output.append(d3)  # 第3个
        # output.append(d4)  # 第4个
        # output.append(d5)  # 第5个
        # output.append(x)  # 第6个
        # output.append(x1)  # 第7个
        # output.append(x2)  # 第8个
        # output.append(x3)  # 第9个
        # output.append(x4)  # 第10个
        # output.append(x5)  # 第11个
        # output.append(x6)  # 第12个
        # output.append(x7)  # 第13个
        # output.append(x8)  # 第14个
        # output.append(x9)  # 第15个
        # output.append(x10)  # 第16个
        # output.append(x11)  # 第17个
        # output.append(x12)  # 第18个
        # output.append(x13)  # 第19个
        # output.append(x14)  # 第20个
        # output.append(x15)  # 第21个
        # output.append(x16)  # 第22个
        # output.append(x17)  # 第23个
        # output.append(x18)  # 第24个
        # # output.append(x19)  # 第25个
        # output.append(x20)  # 第26个
        # output.append(x21)  # 第27个
        #
        #
        # for i, feat in enumerate(output):
        #     print("第{}个:".format(i + 1), feat.shape)


        return x21


class UnetColorSpace(nn.Module):

    def __init__(self, feature_extractor, in_channels, num_classes, pretrained=False):
        super(UnetColorSpace, self).__init__()
        self.compact = UnetColorSpaceGeneratorV2(in_channels, 3)
        # self.features = tf_efficientnet_b0_ns(pretrained=True, num_classes=num_classes)
        self.features = vit_base_patch16_224(pretrained=True, num_classes=num_classes)
        # self.features = feature_extractor(pretrained=pretrained, num_classes=num_classes)

    def forward(self, x):
        x = self.compact(x)
        x = self.features(x)
        return x


def unet_color_space(feature_extractor, in_channels=3, num_classes=2, pretrained=False):
    model = UnetColorSpace(feature_extractor, in_channels=in_channels, num_classes=num_classes, pretrained=pretrained)
    model.default_cfg = model.features.default_cfg
    return model

if __name__ == "__main__":
    model = UnetColorSpace(feature_extractor=0, in_channels=3, num_classes=1000, pretrained=True)
    x = torch.randn(4, 3, 224, 224)
    regression = model(x)
