'''
@author: zhangkai
@license: (C) Copyright 2017-2023
@contact: jeffcobile@gmail.com
@Software : PyCharm
@file: block.py
@time: 2020-06-17 15:03:57
@desc: 
'''
import torch
from collections import OrderedDict


class BasicConv(torch.nn.Module):
    def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
        super(BasicConv, self).__init__()
        self.out_channels = out_planes
        self.conv = torch.nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
        self.bn   = torch.nn.BatchNorm2d(out_planes,eps=1e-5, momentum=0.01, affine=True) if bn else None
        self.relu = torch.nn.ReLU(inplace=True) if relu else None

    def forward(self, x, up_size=None):
        x = self.conv(x)
        if self.bn is not None:
            x = self.bn(x)
        if self.relu is not None:
            x = self.relu(x)
        if up_size is not None:
            x = torch.nn.functional.upsample(x, size=up_size, mode='bilinear')
            # x = self.up_sample(x)
        return x


class conv_bn(torch.nn.Module):
    def __init__(self, inp, oup, kernel, stride, padding, activate='relu6'):
        super(conv_bn, self).__init__()
        if activate == 'relu6':
            self.convbn = torch.nn.Sequential(OrderedDict([
                ('conv', torch.nn.Conv2d(inp, oup, kernel, stride, padding, bias=False)),
                ('bn',   torch.nn.BatchNorm2d(oup)),
                ('relu', torch.nn.ReLU6(inplace=True))
            ]))
        elif activate == 'leaky':
            self.convbn = torch.nn.Sequential(OrderedDict([
                ('conv', torch.nn.Conv2d(inp, oup, kernel, stride, padding, bias=False)),
                ('bn',   torch.nn.BatchNorm2d(oup)),
                ('relu', torch.nn.LeakyReLU(0.1))
            ]))
        else:
            raise AttributeError("activate type not supported")

    def forward(self, input):
        return self.convbn(input)


class sepconv_bn(torch.nn.Module):
    def __init__(self, inp, oup, kernel, stride, padding, seprelu):
        super(sepconv_bn, self).__init__()
        if seprelu:
            self.sepconv_bn = torch.nn.Sequential(OrderedDict([
                ('sepconv',   torch.nn.Conv2d(inp, inp, kernel, stride, padding, groups=inp, bias=False)),
                ('sepbn',     torch.nn.BatchNorm2d(inp)),
                ('seprelu',   torch.nn.ReLU6(inplace=True)),
                ('pointconv', torch.nn.Conv2d(inp, oup, 1, 1, 0, bias=False)),
                ('pointbn',   torch.nn.BatchNorm2d(oup)),
                ('pointrelu', torch.nn.ReLU6(inplace=True)),
            ]))
        else:
            self.sepconv_bn = torch.nn.Sequential(OrderedDict([
                ('sepconv',   torch.nn.Conv2d(inp, inp, kernel, stride, padding, groups=inp, bias=False)),
                ('sepbn',     torch.nn.BatchNorm2d(inp)),
                ('pointconv', torch.nn.Conv2d(inp, oup, 1, 1, 0, bias=False)),
                ('pointbn',   torch.nn.BatchNorm2d(oup)),
                ('pointrelu', torch.nn.ReLU6(inplace=True)),
            ]))

    def forward(self, input):
        return self.sepconv_bn(input)


class conv_bias(torch.nn.Module):
    def __init__(self, inp, oup, kernel, stride, padding):
        super(conv_bias, self).__init__()
        self.conv = torch.nn.Conv2d(inp, oup, kernel, stride, padding, bias=True)

    def forward(self, input):
        return self.conv(input)


class ASFF(torch.nn.Module):
    def __init__(self, level, activate, rfb=False, vis=False):
        super(ASFF, self).__init__()
        self.level = level
        self.dim = [512, 256, 128]
        self.inter_dim = self.dim[self.level]
        if level == 0:
            self.stride_level_1 = conv_bn(256, self.inter_dim, kernel=3, stride=2, padding=1, activate=activate)
            self.stride_level_2 = conv_bn(128, self.inter_dim, kernel=3, stride=2, padding=1, activate=activate)
            self.expand = conv_bn(self.inter_dim, 512, kernel=3, stride=1, padding=1, activate=activate)
        elif level == 1:
            self.compress_level_0 = conv_bn(512, self.inter_dim, kernel=1, stride=1, padding=0, activate=activate)
            self.stride_level_2 = conv_bn(128, self.inter_dim, kernel=3, stride=2, padding=1, activate=activate)
            self.expand = conv_bn(self.inter_dim, 256, kernel=3, stride=1, padding=1, activate=activate)
        elif level == 2:
            self.compress_level_0 = conv_bn(512, self.inter_dim, kernel=1, stride=1, padding=0, activate=activate)
            self.compress_level_1= conv_bn(256,self.inter_dim,kernel=1,stride=1,padding=0,activate=activate)
            self.expand = conv_bn(self.inter_dim, 128, kernel=3, stride=1, padding=1, activate=activate)

        compress_c = 8 if rfb else 16  # when adding rfb, we use half number of channels to save memory

        self.weight_level_0 = conv_bn(self.inter_dim, compress_c, 1, 1, 0, activate=activate)
        self.weight_level_1 = conv_bn(self.inter_dim, compress_c, 1, 1, 0, activate=activate)
        self.weight_level_2 = conv_bn(self.inter_dim, compress_c, 1, 1, 0, activate=activate)

        self.weight_levels = conv_bias(compress_c * 3, 3, kernel=1, stride=1, padding=0)
        self.vis = vis

    def forward(self, x_level_0, x_level_1, x_level_2):
        if self.level == 0:
            level_0_resized = x_level_0
            level_1_resized = self.stride_level_1(x_level_1)
            level_2_downsampled_inter = torch.nn.functional.max_pool2d(x_level_2, 3, stride=2, padding=1)
            level_2_resized = self.stride_level_2(level_2_downsampled_inter)

        elif self.level == 1:
            level_0_compressed = self.compress_level_0(x_level_0)
            sh = torch.tensor(level_0_compressed.shape[-2:])*2
            level_0_resized = torch.nn.functional.interpolate(level_0_compressed, size=tuple(sh), mode='nearest')
            level_1_resized = x_level_1
            level_2_resized = self.stride_level_2(x_level_2)
        elif self.level == 2:
            level_0_compressed = self.compress_level_0(x_level_0)
            sh = torch.tensor(level_0_compressed.shape[-2:])*4
            level_0_resized = torch.nn.functional.interpolate(level_0_compressed, size=tuple(sh), mode='nearest')
            level_1_compressed = self.compress_level_1(x_level_1)
            sh = torch.tensor(level_1_compressed.shape[-2:])*2
            level_1_resized = torch.nn.functional.interpolate(level_1_compressed, size=tuple(sh), mode='nearest')
            level_2_resized = x_level_2
        level_0_weight_v = self.weight_level_0(level_0_resized)
        level_1_weight_v = self.weight_level_1(level_1_resized)
        level_2_weight_v = self.weight_level_2(level_2_resized)
        levels_weight_v = torch.cat((level_0_weight_v, level_1_weight_v, level_2_weight_v), 1)
        levels_weight = self.weight_levels(levels_weight_v)
        levels_weight = torch.nn.functional.softmax(levels_weight, dim=1)

        fused_out_reduced = level_0_resized * levels_weight[:, 0:1, :, :] + \
                            level_1_resized * levels_weight[:, 1:2, :, :] + \
                            level_2_resized * levels_weight[:, 2:, :, :]

        out = self.expand(fused_out_reduced)

        if self.vis:
            return out, levels_weight, fused_out_reduced.sum(dim=1)
        else:
            return out