import torch
import torch.nn as nn
import torch.utils.data
import torch.nn.functional as F
from ...models.model_utils.basic_block_2d import BasicBlock2D
# from .bifpn import BiFPN
import torch.nn as nn
import torch
from torchvision.ops.boxes import nms as nms_torch
import math
from torch import nn
import torch.nn.functional as F


class Conv2dStaticSamePadding(nn.Module):
    """
    created by Zylo117
    The real keras/tensorflow conv2d with same padding
    """

    def __init__(self, in_channels, out_channels, kernel_size, stride=1, bias=True, groups=1, dilation=1, **kwargs):
        super().__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride=stride,
                              bias=bias, groups=groups)
        self.stride = self.conv.stride
        self.kernel_size = self.conv.kernel_size
        self.dilation = self.conv.dilation

        if isinstance(self.stride, int):
            self.stride = [self.stride] * 2
        elif len(self.stride) == 1:
            self.stride = [self.stride[0]] * 2

        if isinstance(self.kernel_size, int):
            self.kernel_size = [self.kernel_size] * 2
        elif len(self.kernel_size) == 1:
            self.kernel_size = [self.kernel_size[0]] * 2

    def forward(self, x):
        h, w = x.shape[-2:]
        
        extra_h = (math.ceil(w / self.stride[1]) - 1) * self.stride[1] - w + self.kernel_size[1]
        extra_v = (math.ceil(h / self.stride[0]) - 1) * self.stride[0] - h + self.kernel_size[0]
        
        left = extra_h // 2
        right = extra_h - left
        top = extra_v // 2
        bottom = extra_v - top

        x = F.pad(x, [left, right, top, bottom])

        x = self.conv(x)
        return x


class MaxPool2dStaticSamePadding(nn.Module):
    """
    created by Zylo117
    The real keras/tensorflow MaxPool2d with same padding
    """

    def __init__(self, *args, **kwargs):
        super().__init__()
        self.pool = nn.MaxPool2d(*args, **kwargs)
        self.stride = self.pool.stride
        self.kernel_size = self.pool.kernel_size

        if isinstance(self.stride, int):
            self.stride = [self.stride] * 2
        elif len(self.stride) == 1:
            self.stride = [self.stride[0]] * 2

        if isinstance(self.kernel_size, int):
            self.kernel_size = [self.kernel_size] * 2
        elif len(self.kernel_size) == 1:
            self.kernel_size = [self.kernel_size[0]] * 2

    def forward(self, x):
        h, w = x.shape[-2:]
        
        extra_h = (math.ceil(w / self.stride[1]) - 1) * self.stride[1] - w + self.kernel_size[1]
        extra_v = (math.ceil(h / self.stride[0]) - 1) * self.stride[0] - h + self.kernel_size[0]

        left = extra_h // 2
        right = extra_h - left
        top = extra_v // 2
        bottom = extra_v - top

        x = F.pad(x, [left, right, top, bottom])

        x = self.pool(x)
        return x


def nms(dets, thresh):
    return nms_torch(dets[:, :4], dets[:, 4], thresh)


class SwishImplementation(torch.autograd.Function):
    @staticmethod
    def forward(ctx, i):
        result = i * torch.sigmoid(i)
        ctx.save_for_backward(i)
        return result

    @staticmethod
    def backward(ctx, grad_output):
        i = ctx.saved_variables[0]
        sigmoid_i = torch.sigmoid(i)
        return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))


class MemoryEfficientSwish(nn.Module):
    def forward(self, x):
        return SwishImplementation.apply(x)


class Swish(nn.Module):
    def forward(self, x):
        return x * torch.sigmoid(x)


class SeparableConvBlock(nn.Module):
    """
    created by Zylo117
    """

    def __init__(self, in_channels, out_channels=None, norm=True, activation=False, onnx_export=False):
        super(SeparableConvBlock, self).__init__()
        if out_channels is None:
            out_channels = in_channels

        # Q: whether separate conv
        #  share bias between depthwise_conv and pointwise_conv
        #  or just pointwise_conv apply bias.
        # A: Confirmed, just pointwise_conv applies bias, depthwise_conv has no bias.

        self.depthwise_conv = Conv2dStaticSamePadding(in_channels, in_channels,
                                                      kernel_size=3, stride=1, groups=in_channels, bias=False)
        self.pointwise_conv = Conv2dStaticSamePadding(in_channels, out_channels, kernel_size=1, stride=1)

        self.norm = norm
        if self.norm:
            # Warning: pytorch momentum is different from tensorflow's, momentum_pytorch = 1 - momentum_tensorflow
            self.bn = nn.BatchNorm2d(num_features=out_channels, momentum=0.01, eps=1e-3)

        self.activation = activation
        if self.activation:
            self.swish = MemoryEfficientSwish() if not onnx_export else Swish()

    def forward(self, x):
        x = self.depthwise_conv(x)
        x = self.pointwise_conv(x)

        if self.norm:
            x = self.bn(x)

        if self.activation:
            x = self.swish(x)

        return x


class BiFPN(nn.Module):
    """
    modified by Zylo117
    """

    def __init__(self, num_channels, conv_channels, up_scales=[2, 2, 2, 2], first_time=False, epsilon=1e-4, onnx_export=False, attention=True,
                 use_p8=False):
        """

        Args:
            num_channels:
            conv_channels:
            first_time: whether the input comes directly from the efficientnet,
                        if True, downchannel it first, and downsample P5 to generate P6 then P7
            epsilon: epsilon of fast weighted attention sum of BiFPN, not the BN's epsilon
            onnx_export: if True, use Swish instead of MemoryEfficientSwish
        """
        super(BiFPN, self).__init__()
        self.epsilon = epsilon
        self.use_p8 = use_p8

        # Conv layers
        self.conv6_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
        self.conv5_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
        self.conv4_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
        self.conv3_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
        self.conv4_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
        self.conv5_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
        self.conv6_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
        self.conv7_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)
        if use_p8:
            self.conv7_up = SeparableConvBlock(num_channels, onnx_export=onnx_export)
            self.conv8_down = SeparableConvBlock(num_channels, onnx_export=onnx_export)

        # Feature scaling layers
        self.p6_upsample = nn.Upsample(scale_factor=2, mode='nearest')
        self.p5_upsample = nn.Upsample(scale_factor=up_scales[2], mode='nearest')
        self.p4_upsample = nn.Upsample(scale_factor=up_scales[1], mode='nearest')
        self.p3_upsample = nn.Upsample(scale_factor=up_scales[0], mode='nearest')

        self.p4_downsample = MaxPool2dStaticSamePadding(7, 4, 1)
        self.p5_downsample = MaxPool2dStaticSamePadding(3, 2)
        self.p6_downsample = MaxPool2dStaticSamePadding(3, 2)
        self.p7_downsample = MaxPool2dStaticSamePadding(3, 2)
        if use_p8:
            self.p7_upsample = nn.Upsample(scale_factor=2, mode='nearest')
            self.p8_downsample = MaxPool2dStaticSamePadding(3, 2)

        self.swish = MemoryEfficientSwish() if not onnx_export else Swish()

        self.first_time = first_time
        if self.first_time:
            self.p6_to_p7 = nn.Sequential(
                Conv2dStaticSamePadding(conv_channels[-1], num_channels, 1),
                nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
                MaxPool2dStaticSamePadding(3, 2)
            )
            # self.p7_down_channel = nn.Sequential(
            #     Conv2dStaticSamePadding(conv_channels[4], num_channels, 1),
            #     nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
            # )
            self.p6_down_channel = nn.Sequential(
                Conv2dStaticSamePadding(conv_channels[3], num_channels, 1),
                nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
            )
            self.p5_down_channel = nn.Sequential(
                Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
                nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
            )
            self.p4_down_channel = nn.Sequential(
                Conv2dStaticSamePadding(conv_channels[1], num_channels, 1),
                nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
            )
            self.p3_down_channel = nn.Sequential(
                Conv2dStaticSamePadding(conv_channels[0], num_channels, 1),
                nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
            )
         
            if use_p8:
                self.p7_to_p8 = nn.Sequential(
                    MaxPool2dStaticSamePadding(3, 2)
                )

            self.p4_down_channel_2 = nn.Sequential(
                Conv2dStaticSamePadding(conv_channels[1], num_channels, 1),
                nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
            )
            self.p5_down_channel_2 = nn.Sequential(
                Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
                nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
            )

        # Weight
        self.p6_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
        self.p6_w1_relu = nn.ReLU()
        self.p5_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
        self.p5_w1_relu = nn.ReLU()
        self.p4_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
        self.p4_w1_relu = nn.ReLU()
        self.p3_w1 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
        self.p3_w1_relu = nn.ReLU()

        self.p4_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
        self.p4_w2_relu = nn.ReLU()
        self.p5_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
        self.p5_w2_relu = nn.ReLU()
        self.p6_w2 = nn.Parameter(torch.ones(3, dtype=torch.float32), requires_grad=True)
        self.p6_w2_relu = nn.ReLU()
        self.p7_w2 = nn.Parameter(torch.ones(2, dtype=torch.float32), requires_grad=True)
        self.p7_w2_relu = nn.ReLU()

        self.attention = attention

    def forward(self, inputs):
        """
        illustration of a minimal bifpn unit
            P7_0 -------------------------> P7_2 -------->
               |-------------|                ↑
                             ↓                |
            P6_0 ---------> P6_1 ---------> P6_2 -------->
               |-------------|--------------↑ ↑
                             ↓                |
            P5_0 ---------> P5_1 ---------> P5_2 -------->
               |-------------|--------------↑ ↑
                             ↓                |
            P4_0 ---------> P4_1 ---------> P4_2 -------->
               |-------------|--------------↑ ↑
                             |--------------↓ |
            P3_0 -------------------------> P3_2 -------->
        """

        # downsample channels using same-padding conv2d to target phase's if not the same
        # judge: same phase as target,
        # if same, pass;
        # elif earlier phase, downsample to target phase's by pooling
        # elif later phase, upsample to target phase's by nearest interpolation

        if self.attention:
            outs = self._forward_fast_attention(inputs)
        else:
            outs = self._forward(inputs)

        return outs

    def _forward_fast_attention(self, inputs):
        if self.first_time:
            p3, p4, p5, p6 = inputs

            p7_in = self.p6_to_p7(p6)
            p6_in = self.p6_down_channel(p6)
            

            p3_in = self.p3_down_channel(p3)
            p4_in = self.p4_down_channel(p4)
            p5_in = self.p5_down_channel(p5)

        else:
            # P3_0, P4_0, P5_0, P6_0 and P7_0
            p3_in, p4_in, p5_in, p6_in, p7_in = inputs

        # P7_0 to P7_2

        # Weights for P6_0 and P7_0 to P6_1
        p6_w1 = self.p6_w1_relu(self.p6_w1)
        weight = p6_w1 / (torch.sum(p6_w1, dim=0) + self.epsilon)
        # Connections for P6_0 and P7_0 to P6_1 respectively
        p6_up = self.conv6_up(self.swish(weight[0] * p6_in + weight[1] * self.p6_upsample(p7_in)))

        # Weights for P5_0 and P6_1 to P5_1
        p5_w1 = self.p5_w1_relu(self.p5_w1)
        weight = p5_w1 / (torch.sum(p5_w1, dim=0) + self.epsilon)
        # Connections for P5_0 and P6_1 to P5_1 respectively
        p5_up = self.conv5_up(self.swish(weight[0] * p5_in + weight[1] * self.p5_upsample(p6_up)))

        # Weights for P4_0 and P5_1 to P4_1
        p4_w1 = self.p4_w1_relu(self.p4_w1)
        weight = p4_w1 / (torch.sum(p4_w1, dim=0) + self.epsilon)
        # Connections for P4_0 and P5_1 to P4_1 respectively
        p4_up = self.conv4_up(self.swish(weight[0] * p4_in + weight[1] * self.p4_upsample(p5_up)))

        # Weights for P3_0 and P4_1 to P3_2
        p3_w1 = self.p3_w1_relu(self.p3_w1)
        weight = p3_w1 / (torch.sum(p3_w1, dim=0) + self.epsilon)
        # Connections for P3_0 and P4_1 to P3_2 respectively
        p3_out = self.conv3_up(self.swish(weight[0] * p3_in + weight[1] * self.p3_upsample(p4_up)))

        if self.first_time:
            p4_in = self.p4_down_channel_2(p4)
            p5_in = self.p5_down_channel_2(p5)

        # Weights for P4_0, P4_1 and P3_2 to P4_2
        p4_w2 = self.p4_w2_relu(self.p4_w2)
        weight = p4_w2 / (torch.sum(p4_w2, dim=0) + self.epsilon)
        # Connections for P4_0, P4_1 and P3_2 to P4_2 respectively
        p4_out = self.conv4_down(
            self.swish(weight[0] * p4_in + weight[1] * p4_up + weight[2] * self.p4_downsample(p3_out)))

        # Weights for P5_0, P5_1 and P4_2 to P5_2
        p5_w2 = self.p5_w2_relu(self.p5_w2)
        weight = p5_w2 / (torch.sum(p5_w2, dim=0) + self.epsilon)
        # Connections for P5_0, P5_1 and P4_2 to P5_2 respectively
        p5_out = self.conv5_down(
            self.swish(weight[0] * p5_in + weight[1] * p5_up + weight[2] * self.p5_downsample(p4_out)))

        # Weights for P6_0, P6_1 and P5_2 to P6_2
        p6_w2 = self.p6_w2_relu(self.p6_w2)
        weight = p6_w2 / (torch.sum(p6_w2, dim=0) + self.epsilon)
        # Connections for P6_0, P6_1 and P5_2 to P6_2 respectively
        p6_out = self.conv6_down(
            self.swish(weight[0] * p6_in + weight[1] * p6_up + weight[2] * self.p6_downsample(p5_out)))

        # Weights for P7_0 and P6_2 to P7_2
        p7_w2 = self.p7_w2_relu(self.p7_w2)
        weight = p7_w2 / (torch.sum(p7_w2, dim=0) + self.epsilon)
        # Connections for P7_0 and P6_2 to P7_2
        p7_out = self.conv7_down(self.swish(weight[0] * p7_in + weight[1] * self.p7_downsample(p6_out)))

        return p3_out, p4_out, p5_out, p6_out, p7_out

    def _forward(self, inputs):
        if self.first_time:
            p3, p4, p5, p6 = inputs

            p7_in = self.p6_to_p7(p6)
            p6_in = self.p6_down_channel(p6)
            
            if self.use_p8:
                p8_in = self.p7_to_p8(p7_in)

            p3_in = self.p3_down_channel(p3)
            p4_in = self.p4_down_channel(p4)
            p5_in = self.p5_down_channel(p5)

        else:
            if self.use_p8:
                # P3_0, P4_0, P5_0, P6_0, P7_0 and P8_0
                p3_in, p4_in, p5_in, p6_in, p7_in, p8_in = inputs
            else:
                # P3_0, P4_0, P5_0, P6_0 and P7_0
                p3_in, p4_in, p5_in, p6_in, p7_in = inputs

        if self.use_p8:
            # P8_0 to P8_2

            # Connections for P7_0 and P8_0 to P7_1 respectively
            p7_up = self.conv7_up(self.swish(p7_in + self.p7_upsample(p8_in)))

            # Connections for P6_0 and P7_0 to P6_1 respectively
            p6_up = self.conv6_up(self.swish(p6_in + self.p6_upsample(p7_up)))
        else:
            # P7_0 to P7_2

            # Connections for P6_0 and P7_0 to P6_1 respectively
            p6_up = self.conv6_up(self.swish(p6_in + self.p6_upsample(p7_in)))

        # Connections for P5_0 and P6_1 to P5_1 respectively
        p5_up = self.conv5_up(self.swish(p5_in + self.p5_upsample(p6_up)))

        # Connections for P4_0 and P5_1 to P4_1 respectively
        p4_up = self.conv4_up(self.swish(p4_in + self.p4_upsample(p5_up)))

        # Connections for P3_0 and P4_1 to P3_2 respectively
        p3_out = self.conv3_up(self.swish(p3_in + self.p3_upsample(p4_up)))

        if self.first_time:
            p4_in = self.p4_down_channel_2(p4)
            p5_in = self.p5_down_channel_2(p5)

        # Connections for P4_0, P4_1 and P3_2 to P4_2 respectively
        p4_out = self.conv4_down(
            self.swish(p4_in + p4_up + self.p4_downsample(p3_out)))

        # Connections for P5_0, P5_1 and P4_2 to P5_2 respectively
        p5_out = self.conv5_down(
            self.swish(p5_in + p5_up + self.p5_downsample(p4_out)))

        # Connections for P6_0, P6_1 and P5_2 to P6_2 respectively
        p6_out = self.conv6_down(
            self.swish(p6_in + p6_up + self.p6_downsample(p5_out)))

        if self.use_p8:
            # Connections for P7_0, P7_1 and P6_2 to P7_2 respectively
            p7_out = self.conv7_down(
                self.swish(p7_in + p7_up + self.p7_downsample(p6_out)))

            # Connections for P8_0 and P7_2 to P8_2
            p8_out = self.conv8_down(self.swish(p8_in + self.p8_downsample(p7_out)))

            return p3_out, p4_out, p5_out, p6_out, p7_out, p8_out
        else:
            # Connections for P7_0 and P6_2 to P7_2
            p7_out = self.conv7_down(self.swish(p7_in + self.p7_downsample(p6_out)))

            return p3_out, p4_out, p5_out, p6_out, p7_out


def convbn(in_planes,
           out_planes,
           kernel_size,
           stride,
           pad,
           dilation=1,
           gn=False,
           groups=32):
    if gn and out_planes % 32 != 0: 
        print('Cannot apply GN as the channels is not 32-divisible.')
        gn = False
    return nn.Sequential(
        nn.Conv2d(in_planes,
                  out_planes,
                  kernel_size=kernel_size,
                  stride=stride,
                  padding=dilation if dilation > 1 else pad,
                  dilation=dilation,
                  bias=False),
        nn.BatchNorm2d(out_planes) if not gn else nn.GroupNorm(
            groups, out_planes))


def convbn_3d(in_planes,
              out_planes,
              kernel_size,
              stride,
              pad,
              gn=False,
              groups=32):
    return nn.Sequential(
        nn.Conv3d(in_planes,
                  out_planes,
                  kernel_size=kernel_size,
                  padding=pad,
                  stride=stride,
                  bias=False),
        nn.BatchNorm3d(out_planes) if not gn else nn.GroupNorm(
            groups, out_planes))


class BasicBlock(nn.Module):
    expansion = 1

    def __init__(self,
                 inplanes,
                 planes,
                 stride,
                 downsample,
                 pad,
                 dilation,
                 gn=False):
        super(BasicBlock, self).__init__()

        self.conv1 = nn.Sequential(
            convbn(inplanes, planes, 3, stride, pad, dilation, gn=gn),
            nn.ReLU(inplace=True))

        self.conv2 = convbn(planes, planes, 3, 1, pad, dilation, gn=gn)

        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        out = self.conv1(x)
        out = self.conv2(out)

        if self.downsample is not None:
            x = self.downsample(x)

        out += x

        return out


class disparityregression(nn.Module):
    def __init__(self):
        super(disparityregression, self).__init__()

    def forward(self, x, depth):
        assert len(x.shape) == 4
        assert len(depth.shape) == 1
        out = torch.sum(x * depth[None, :, None, None], 1)
        return out


class hourglass(nn.Module):
    def __init__(self, inplanes, gn=False, planes_mul=[2, 2]):
        super(hourglass, self).__init__()

        self.conv1 = nn.Sequential(
            convbn_3d(inplanes,
                      inplanes * planes_mul[0],
                      kernel_size=3,
                      stride=2,
                      pad=1,
                      gn=gn), nn.ReLU(inplace=True))

        self.conv2 = convbn_3d(inplanes * planes_mul[0],
                               inplanes * planes_mul[0],
                               kernel_size=3,
                               stride=1,
                               pad=1,
                               gn=gn)

        self.conv3 = nn.Sequential(
            convbn_3d(inplanes * planes_mul[0],
                      inplanes * planes_mul[1],
                      kernel_size=3,
                      stride=2,
                      pad=1,
                      gn=gn), nn.ReLU(inplace=True))

        self.conv4 = nn.Sequential(
            convbn_3d(inplanes * planes_mul[1],
                      inplanes * planes_mul[1],
                      kernel_size=3,
                      stride=1,
                      pad=1,
                      gn=gn), nn.ReLU(inplace=True))

        self.conv5 = nn.Sequential(
            nn.ConvTranspose3d(inplanes * planes_mul[1],
                               inplanes * planes_mul[0],
                               kernel_size=3,
                               padding=1,
                               output_padding=1,
                               stride=2,
                               bias=False),
            nn.BatchNorm3d(inplanes * planes_mul[0]) if not gn else nn.GroupNorm(32, inplanes * planes_mul[0]))  # +conv2

        self.conv6 = nn.Sequential(
            nn.ConvTranspose3d(inplanes * planes_mul[0],
                               inplanes,
                               kernel_size=3,
                               padding=1,
                               output_padding=1,
                               stride=2,
                               bias=False),
            nn.BatchNorm3d(inplanes)
            if not gn else nn.GroupNorm(32, inplanes))  # +x

    def forward(self, x, presqu=None, postsqu=None):

        out = self.conv1(x)  # in:1/4 out:1/8
        pre = self.conv2(out)  # in:1/8 out:1/8
        if postsqu is not None:
            pre = F.relu(pre + postsqu, inplace=True)
        else:
            pre = F.relu(pre, inplace=True)

        out = self.conv3(pre)  # in:1/8 out:1/16
        out = self.conv4(out)  # in:1/16 out:1/16

        if presqu is not None:
            post = F.relu(self.conv5(out) + presqu,
                          inplace=True)  # in:1/16 out:1/8
        else:
            post = F.relu(self.conv5(out) + pre, inplace=True)

        out = self.conv6(post)  # in:1/8 out:1/4

        if presqu is None and postsqu is None:
            return out
        else:
            return out, pre, post

class hourglass_bev(hourglass):
    def __init__(self, inplanes, gn=False):
        super(hourglass_bev, self).__init__(inplanes, gn)

        self.conv1 = nn.Sequential(
            convbn_3d(inplanes,
                      inplanes * 2,
                      kernel_size=3,
                      stride=(1, 2, 2),
                      pad=1,
                      gn=gn), nn.ReLU(inplace=True))

        self.conv2 = convbn_3d(inplanes * 2,
                               inplanes * 2,
                               kernel_size=3,
                               stride=1,
                               pad=1,
                               gn=gn)

        self.conv3 = nn.Sequential(
            convbn_3d(inplanes * 2,
                      inplanes * 2,
                      kernel_size=3,
                      stride=(1, 2, 2),
                      pad=1,
                      gn=gn), nn.ReLU(inplace=True))

        self.conv4 = nn.Sequential(
            convbn_3d(inplanes * 2,
                      inplanes * 2,
                      kernel_size=3,
                      stride=1,
                      pad=1,
                      gn=gn), nn.ReLU(inplace=True))

        self.conv5 = nn.Sequential(
            nn.ConvTranspose3d(inplanes * 2,
                               inplanes * 2,
                               kernel_size=3,
                               padding=1,
                               output_padding=(0, 1, 1),
                               stride=(1, 2, 2),
                               bias=False),
            nn.BatchNorm3d(inplanes *
                           2) if not gn else nn.GroupNorm(32, inplanes *
                                                          2))  # +conv2

        self.conv6 = nn.Sequential(
            nn.ConvTranspose3d(inplanes * 2,
                               inplanes,
                               kernel_size=3,
                               padding=1,
                               output_padding=(0, 1, 1),
                               stride=(1, 2, 2),
                               bias=False),
            nn.BatchNorm3d(inplanes)
            if not gn else nn.GroupNorm(32, inplanes))  # +x


class hourglass2d(nn.Module):
    def __init__(self, inplanes, gn=False):
        super(hourglass2d, self).__init__()

        self.conv1 = nn.Sequential(
            convbn(inplanes,
                   inplanes * 2,
                   kernel_size=3,
                   stride=2,
                   pad=1,
                   dilation=1,
                   gn=gn), nn.ReLU(inplace=True))

        self.conv2 = convbn(inplanes * 2,
                            inplanes * 2,
                            kernel_size=3,
                            stride=1,
                            pad=1,
                            dilation=1,
                            gn=gn)

        self.conv3 = nn.Sequential(
            convbn(inplanes * 2,
                   inplanes * 2,
                   kernel_size=3,
                   stride=2,
                   pad=1,
                   dilation=1,
                   gn=gn), nn.ReLU(inplace=True))

        self.conv4 = nn.Sequential(
            convbn(inplanes * 2,
                   inplanes * 2,
                   kernel_size=3,
                   stride=1,
                   pad=1,
                   dilation=1,
                   gn=gn), nn.ReLU(inplace=True))

        self.conv5 = nn.Sequential(
            nn.ConvTranspose2d(inplanes * 2,
                               inplanes * 2,
                               kernel_size=3,
                               padding=1,
                               output_padding=1,
                               stride=2,
                               bias=False),
            nn.BatchNorm2d(inplanes *
                           2) if not gn else nn.GroupNorm(32, inplanes *
                                                          2))  # +conv2

        self.conv6 = nn.Sequential(
            nn.ConvTranspose2d(inplanes * 2,
                               inplanes,
                               kernel_size=3,
                               padding=1,
                               output_padding=1,
                               stride=2,
                               bias=False),
            nn.BatchNorm2d(inplanes)
            if not gn else nn.GroupNorm(32, inplanes))  # +x

    def forward(self, x, presqu, postsqu):
        out = self.conv1(x)  # in:1/4 out:1/8
        pre = self.conv2(out)  # in:1/8 out:1/8
        if postsqu is not None:
            pre = F.relu(pre + postsqu, inplace=True)
        else:
            pre = F.relu(pre, inplace=True)

        out = self.conv3(pre)  # in:1/8 out:1/16
        out = self.conv4(out)  # in:1/16 out:1/16

        if presqu is not None:
            post = F.relu(self.conv5(out) + presqu,
                          inplace=True)  # in:1/16 out:1/8
        else:
            post = F.relu(self.conv5(out) + pre, inplace=True)

        out = self.conv6(post)  # in:1/8 out:1/4

        return out, pre, post

class upconv_module(nn.Module):
    def __init__(self, in_channels, up_channels, share_upconv=False, final_channels=None, kernel1=True):
        super(upconv_module, self).__init__()
        self.num_stage = len(in_channels) - 1
        self.conv = nn.ModuleList()
        self.redir = nn.ModuleList()
        for stage_idx in range(self.num_stage):
            self.conv.append(
                convbn(in_channels[0] if stage_idx == 0 else up_channels[stage_idx - 1], up_channels[stage_idx], 3 if stage_idx != 0 or not kernel1 else 1, 1, 1 if stage_idx != 0 or not kernel1  else 0, 1)
            )
            self.redir.append(
                convbn(in_channels[stage_idx + 1], up_channels[stage_idx], 3, 1, 1, 1)
            )
        self.up = nn.Upsample(scale_factor=2, mode='bilinear')
        
        self.share_upconv = share_upconv
        if self.share_upconv:
            self.lastconv = nn.Conv2d(up_channels[stage_idx], final_channels[-1],
                kernel_size=1,
                padding=0,
                stride=1,
                bias=False)
            self.rpnconv = nn.Conv2d(up_channels[stage_idx], final_channels[-1],
                kernel_size=1,
                padding=0,
                stride=1,
                bias=False)

    def forward(self, feats):
        x = feats[0]
        for stage_idx in range(self.num_stage):
            x = self.conv[stage_idx](x)
            redir = self.redir[stage_idx](feats[stage_idx + 1])
            x = F.relu(self.up(x) + redir)
        
        if self.share_upconv:
            return self.lastconv(x), self.rpnconv(x)
        else:
            return x

class upconv_module_cat(nn.Module):
    def __init__(self, in_channels, up_channels, final_channels):
        super(upconv_module_cat, self).__init__()
        self.num_stage = len(in_channels)
        self.conv = nn.ModuleList()
        self.squeezeconv = nn.Conv2d(sum(up_channels), final_channels,
            kernel_size=1, padding=0, stride=1, bias=True)
        # nn.Sequential(
        #     convbn(sum(up_channels), final_channels, 1, 1, 0, 1))
        for stage_idx in range(self.num_stage):
            self.conv.append(nn.Sequential(
                convbn(in_channels[stage_idx], up_channels[stage_idx], 1, 1, 0, 1),
                nn.ReLU(inplace=True)))

    def forward(self, feats):
        feat_0 = F.interpolate(self.conv[0](feats[0]), scale_factor=4, mode='bilinear', align_corners=True)
        feat_1 = F.interpolate(self.conv[1](feats[1]), scale_factor=2, mode='bilinear', align_corners=True)
        feat_2 = self.conv[2](feats[2])
        cat_feats = torch.cat([feat_0, feat_1, feat_2], dim=1)
        x = self.squeezeconv(cat_feats)
        return x

class upconv_module_catk3(nn.Module):
    def __init__(self, in_channels, up_channels, final_channels, share_upconv=False):
        super(upconv_module_catk3, self).__init__()
        self.num_stage = len(in_channels)
        self.conv = nn.ModuleList()
        for stage_idx in range(self.num_stage):
            self.conv.append(nn.Sequential(
                convbn(in_channels[stage_idx], up_channels[stage_idx], 3, 1, 1, 1),
                nn.ReLU(inplace=True)))
        self.squeezeconv = nn.Sequential(
            convbn(sum(up_channels), final_channels[0], 3, 1, 1, gn=True),
            nn.ReLU(inplace=True),
            nn.Conv2d(final_channels[0], final_channels[1],
                    kernel_size=1,
                    padding=0,
                    stride=1,
                    bias=False))
        
        self.share_upconv = share_upconv
        if self.share_upconv:
            self.lastconv = nn.Conv2d(final_channels[1], final_channels[1],
                kernel_size=1,
                padding=0,
                stride=1,
                bias=False)
            self.rpnconv = nn.Conv2d(final_channels[1], final_channels[1],
                kernel_size=1,
                padding=0,
                stride=1,
                bias=False)

    def forward(self, feats):
        feat_0 = F.interpolate(self.conv[0](feats[0]), scale_factor=4, mode='bilinear', align_corners=True)
        feat_1 = F.interpolate(self.conv[1](feats[1]), scale_factor=2, mode='bilinear', align_corners=True)
        feat_2 = self.conv[2](feats[2])
        cat_feats = torch.cat([feat_0, feat_1, feat_2], dim=1)
        x = self.squeezeconv(cat_feats)

        if self.share_upconv:
            return self.lastconv(x), self.rpnconv(x)
        else:
            return x


class upconv_module_catk4(nn.Module):
    def __init__(self, in_channels, up_channels, final_channels, share_upconv=False):
        super(upconv_module_catk4, self).__init__()
        self.num_stage = len(in_channels)
        self.conv = nn.ModuleList()
        for stage_idx in range(self.num_stage):
            self.conv.append(nn.Sequential(
                convbn(in_channels[stage_idx], up_channels[stage_idx], 3, 1, 1, 1),
                nn.ReLU(inplace=True)))
        self.squeezeconv = nn.Sequential(
            convbn(sum(up_channels), final_channels[0], 3, 1, 1, gn=True),
            nn.ReLU(inplace=True),
            nn.Conv2d(final_channels[0], final_channels[1],
                    kernel_size=1,
                    padding=0,
                    stride=1,
                    bias=False))
        
        self.share_upconv = share_upconv
        if self.share_upconv:
            self.lastconv = nn.Conv2d(final_channels[1], final_channels[1],
                kernel_size=1,
                padding=0,
                stride=1,
                bias=False)
            self.rpnconv = nn.Conv2d(final_channels[1], final_channels[1],
                kernel_size=1,
                padding=0,
                stride=1,
                bias=False)

    def forward(self, feats):
        feat_0 = F.interpolate(self.conv[0](feats[0]), scale_factor=2, mode='bilinear', align_corners=True)
        feat_1 = F.interpolate(self.conv[1](feats[1]), scale_factor=2, mode='bilinear', align_corners=True)
        feat_2 = self.conv[2](feats[2])
        cat_feats = torch.cat([feat_0, feat_1, feat_2], dim=1)
        x = self.squeezeconv(cat_feats)

        if self.share_upconv:
            return self.lastconv(x), self.rpnconv(x)
        else:
            return x


class upconv_module(nn.Module):
    def __init__(self, in_channels, up_channels, share_upconv=False, final_channels=None, kernel1=True):
        super(upconv_module, self).__init__()
        self.num_stage = len(in_channels) - 1
        self.conv = nn.ModuleList()
        self.redir = nn.ModuleList()
        for stage_idx in range(self.num_stage):
            self.conv.append(
                convbn(in_channels[0] if stage_idx == 0 else up_channels[stage_idx - 1], up_channels[stage_idx], 3 if stage_idx != 0 or not kernel1 else 1, 1, 1 if stage_idx != 0 or not kernel1  else 0, 1)
            )
            self.redir.append(
                convbn(in_channels[stage_idx + 1], up_channels[stage_idx], 3, 1, 1, 1)
            )
        self.up = nn.Upsample(scale_factor=2, mode='bilinear')
        
        self.share_upconv = share_upconv
        if self.share_upconv:
            self.lastconv = nn.Conv2d(up_channels[stage_idx], final_channels[-1],
                kernel_size=1,
                padding=0,
                stride=1,
                bias=False)
            self.rpnconv = nn.Conv2d(up_channels[stage_idx], final_channels[-1],
                kernel_size=1,
                padding=0,
                stride=1,
                bias=False)

    def forward(self, feats):
        x = feats[0]
        for stage_idx in range(self.num_stage):
            x = self.conv[stage_idx](x)
            redir = self.redir[stage_idx](feats[stage_idx + 1])
            x = F.relu(self.up(x) + redir)
        
        if self.share_upconv:
            return self.lastconv(x), self.rpnconv(x)
        else:
            return x


class upconv_module_fpn_2(nn.Module):
    def __init__(self, in_channels, up_channels, up_scales = [2], share_upconv=False, final_channels=None, kernel1=True):
        super(upconv_module_fpn_2, self).__init__()
        self.num_stage = len(in_channels) - 1
        self.conv = nn.ModuleList()
        self.redir = nn.ModuleList()
        self.ups = nn.ModuleList()
        for stage_idx in range(self.num_stage):
            self.conv.append(
                convbn(in_channels[0] if stage_idx == 0 else up_channels[stage_idx - 1], up_channels[stage_idx], 3 if stage_idx != 0 or not kernel1 else 1, 1, 1 if stage_idx != 0 or not kernel1  else 0, 1)
            )
            self.redir.append(
                convbn(in_channels[stage_idx + 1], up_channels[stage_idx], 3, 1, 1, 1)
            )
            self.ups.append(nn.Upsample(scale_factor=up_scales[stage_idx], mode='bilinear')) 
        
        self.share_upconv = share_upconv
        if self.share_upconv:
            self.lastconv = nn.Conv2d(up_channels[stage_idx], final_channels[-1],
                kernel_size=1,
                padding=0,
                stride=1,
                bias=False)
            self.rpnconv = nn.Conv2d(up_channels[stage_idx], final_channels[-1],
                kernel_size=1,
                padding=0,
                stride=1,
                bias=False)

    def forward(self, feats):
        x = feats[0]
        for stage_idx in range(self.num_stage):
            x = self.conv[stage_idx](x)
            redir = self.redir[stage_idx](feats[stage_idx + 1])
            x = F.relu(self.ups[stage_idx](x) + redir)
        
        if self.share_upconv:
            return self.lastconv(x), self.rpnconv(x)
        else:
            return x


class upconv_module_fpn(nn.Module):
    def __init__(self, in_channels, up_channels, final_channels, final_sem_channel, up_scales=[2, 2, 4], share_upconv=False):
        super(upconv_module_fpn, self).__init__()
        self.num_stage = len(in_channels)
        self.up_scales = up_scales
        self.conv = nn.ModuleList()
        self.fuseconv = nn.ModuleList()
        for stage_idx in range(self.num_stage):
            self.conv.append(nn.Sequential(
                convbn(in_channels[stage_idx], up_channels[stage_idx], 3, 1, 1, 1),
                nn.ReLU(inplace=True)))
        for stage_idx in range(self.num_stage - 1):
            if stage_idx == 0:
                self.fuseconv.append(
                    nn.Sequential(
                        convbn(sum(up_channels[:2]), 64, 3, 1, 1, gn=True),
                        nn.ReLU(inplace=True)  
                    )
                )
            else:
                self.fuseconv.append(
                    nn.Sequential(
                        convbn(sum([64, up_channels[stage_idx + 1]]),  64, 3, 1, 1, gn=True),
                        nn.ReLU(inplace=True)  
                    )
                )
        self.fuseconv1 = nn.Sequential(
            convbn(sum(up_channels[:2]), 64, 3, 1, 1, gn=True),
            nn.ReLU(inplace=True))
        self.fuseconv2 = nn.Sequential(
            convbn(sum([64, up_channels[2]]), 64, 3, 1, 1, gn=True),
            nn.ReLU(inplace=True))
        self.squeezeconv = nn.Sequential(
            convbn(sum([64, up_channels[-1]]), final_channels[0], 3, 1, 1, gn=True),
            nn.ReLU(inplace=True),
            nn.Conv2d(final_channels[0], final_channels[1],
                      kernel_size=1,
                      padding=0,
                      stride=1,
                      bias=False))
        self.share_upconv = share_upconv
        if self.share_upconv:
            self.lastconv = nn.Conv2d(final_channels[1], final_channels[1],
                                      kernel_size=1,
                                      padding=0,
                                      stride=1,
                                      bias=False)
            self.rpnconv = nn.Conv2d(final_channels[1], final_sem_channel,
                                     kernel_size=1,
                                     padding=0,
                                     stride=1,
                                     bias=False)

    def forward(self, feats):
        # import pdb;pdb.set_trace()
        feat = F.interpolate(self.conv[0](feats[0]), scale_factor=self.up_scales[0], mode='bilinear', align_corners=True) # lateral and 2x upsample
        for i in range(1, len(feats)):
            redir_feat = self.conv[i](feats[i]) # lateral
            if i != len(feats) - 1:
                feat = self.fuseconv[i-1](torch.cat([redir_feat, feat], dim=1))
                feat = F.interpolate(feat, scale_factor=self.up_scales[i], mode='bilinear', align_corners=True)
            else:
                feat = self.squeezeconv(torch.cat([redir_feat, feat], dim=1))
                
        if self.share_upconv:
            return self.lastconv(feat), self.rpnconv(feat) # stereo and semantic
        else:
            return feat


class upconv_module_bifpn(nn.Module):
    def __init__(self, fpn_sizes, final_channels, W_bifpn=64, share_upconv=False):
        super(upconv_module_bifpn, self).__init__()
        
        P3_channels, P4_channels, P5_channels, P7_channels = fpn_sizes[::-1]
        self.W_bifpn = W_bifpn

        self.p5_td_conv  = nn.Conv2d(P5_channels,self.W_bifpn, kernel_size=3, stride=1, bias=True, padding=1)
        self.p5_td_conv_2  = nn.Conv2d(self.W_bifpn,self.W_bifpn, kernel_size=3, stride=1, groups=self.W_bifpn, bias=True, padding=1)
        self.p5_td_act   = nn.ReLU()
        self.p5_td_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p5_td_w1    = torch.tensor(1/2, dtype=torch.float, requires_grad=True)
        self.p5_td_w2    = torch.tensor(1/2, dtype=torch.float, requires_grad=True)

        self.p4_td_conv  = nn.Conv2d(P4_channels, self.W_bifpn, kernel_size=3, stride=1, bias=True, padding=1)
        self.p4_td_conv_2  = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1, groups=self.W_bifpn, bias=True, padding=1)
        self.p4_td_act   = nn.ReLU()
        self.p4_td_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p4_td_w1    = torch.tensor(1/2, dtype=torch.float, requires_grad=True)
        self.p4_td_w2    = torch.tensor(1/2, dtype=torch.float, requires_grad=True)
        self.p5_upsample   = nn.Upsample(scale_factor=2, mode='nearest')


        self.p3_out_conv = nn.Conv2d(P3_channels, self.W_bifpn, kernel_size=3, stride=1, bias=True, padding=1)
        self.p3_out_conv_2 = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1, groups=self.W_bifpn, bias=True, padding=1)
        self.p3_out_act   = nn.ReLU()
        self.p3_out_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p3_out_w1   = torch.tensor(1/2, dtype=torch.float, requires_grad=True)
        self.p3_out_w2   = torch.tensor(1/2, dtype=torch.float, requires_grad=True)
        self.p4_upsample  = nn.Upsample(scale_factor=4, mode='nearest')

        #self.p4_out_conv = nn.Conv2d(P4_channels, self.W_bifpn, kernel_size=3, stride=1, bias=True, padding=1)
        self.p4_out_conv = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1, groups=self.W_bifpn, bias=True, padding=1)
        self.p4_out_act   = nn.ReLU()
        self.p4_out_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p4_out_w1   = torch.tensor(1/3, dtype=torch.float, requires_grad=True)
        self.p4_out_w2   = torch.tensor(1/3, dtype=torch.float, requires_grad=True)
        self.p4_out_w3   = torch.tensor(1/3, dtype=torch.float, requires_grad=True)
        self.p3_downsample= nn.MaxPool2d(kernel_size=4)

        #self.p5_out_conv = nn.Conv2d(P5_channels,self.W_bifpn, kernel_size=3, stride=1, bias=True, padding=1)
        self.p5_out_conv = nn.Conv2d(self.W_bifpn,self.W_bifpn, kernel_size=3, stride=1, groups=self.W_bifpn, bias=True, padding=1)
        self.p5_out_act   = nn.ReLU()
        self.p5_out_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p5_out_w1   = torch.tensor(1/3, dtype=torch.float, requires_grad=True)
        self.p5_out_w2   = torch.tensor(1/3, dtype=torch.float, requires_grad=True)
        self.p5_out_w3   = torch.tensor(1/3, dtype=torch.float, requires_grad=True)
        self.p4_downsample= nn.MaxPool2d(kernel_size=2)

        #self.p6_out_conv = nn.Conv2d(P6_channels, self.W_bifpn, kernel_size=3, stride=1, bias=True, padding=1)
        self.p6_out_conv = nn.Conv2d(self.W_bifpn, self.W_bifpn, kernel_size=3, stride=1, groups=self.W_bifpn, bias=True, padding=1)
        self.p6_out_act   = nn.ReLU()
        self.p6_out_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p6_out_w1   = torch.tensor(1/3, dtype=torch.float, requires_grad=True)
        self.p6_out_w2   = torch.tensor(1/3, dtype=torch.float, requires_grad=True)
        self.p6_out_w3   = torch.tensor(1/3, dtype=torch.float, requires_grad=True)
        #self.p4_downsample= nn.MaxPool2d(kernel_size=2)


        self.p7_out_conv = nn.Conv2d(P7_channels,self.W_bifpn, kernel_size=3, stride=1, bias=True, padding=1)
        self.p7_out_conv_2 = nn.Conv2d(self.W_bifpn,self.W_bifpn, kernel_size=3, stride=1, groups=self.W_bifpn, bias=True, padding=1)
        self.p7_out_act  = nn.ReLU()
        self.p7_out_conv_bn = nn.BatchNorm2d(self.W_bifpn)
        self.p7_out_w1   = torch.tensor(1/2, dtype=torch.float, requires_grad=True)
        self.p7_out_w2   = torch.tensor(1/2, dtype=torch.float, requires_grad=True)
        self.p7_upsample = nn.Upsample(scale_factor=2, mode='nearest')

        self.share_upconv = share_upconv
        if self.share_upconv:
            self.lastconv = nn.Conv2d(self.W_bifpn, final_channels[0],
                                      kernel_size=1,
                                      padding=0,
                                      stride=1,
                                      bias=False)
            self.rpnconv = nn.Conv2d(self.W_bifpn, final_channels[0],
                                     kernel_size=1,
                                     padding=0,
                                     stride=1,
                                     bias=False)

    def forward(self, inputs):
        # import pdb;pdb.set_trace()
        epsilon = 0.0001
        P7, P5, P4, P3 = inputs

        P7_td  = self.p7_out_conv(P7)

        P5_td_inp = self.p5_td_conv(P5)
        #print (P5_td_inp.shape, P6_td.shape)
        P5_td = self.p5_td_conv_2((self.p5_td_w1 * P5_td_inp + self.p5_td_w2 * self.p7_upsample(P7_td)) /
                                 (self.p5_td_w1 + self.p5_td_w2 + epsilon))
        P5_td = self.p5_td_act(P5_td)
        P5_td = self.p5_td_conv_bn(P5_td)

        #print (P4.shape, P5_td.shape)
        P4_td_inp = self.p4_td_conv(P4)
        P4_td = self.p4_td_conv_2((self.p4_td_w1 * P4_td_inp + self.p4_td_w2 * self.p5_upsample(P5_td)) /
                                 (self.p4_td_w1 + self.p4_td_w2 + epsilon))
        P4_td = self.p4_td_act(P4_td)
        P4_td = self.p4_td_conv_bn(P4_td)


        P3_td  = self.p3_out_conv(P3)
        P3_out = self.p3_out_conv_2((self.p3_out_w1 * P3_td + self.p3_out_w2 * self.p4_upsample(P4_td)) /
                                 (self.p3_out_w1 + self.p3_out_w2 + epsilon))
        P3_out = self.p3_out_act(P3_out)
        P3_out = self.p3_out_conv_bn(P3_out)
        
        if self.share_upconv:
            return self.lastconv(P3_out), self.rpnconv(P3_out)
        else:
            return P3_out


class feature_extraction_neck(nn.Module):
    def __init__(self, cfg):
        super(feature_extraction_neck, self).__init__()

        self.cfg = cfg
        self.in_dims = cfg.in_dims
        self.with_upconv = cfg.with_upconv
        self.share_upconv = getattr(cfg, 'share_upconv', False)
        self.upconv_type = getattr(cfg, 'upconv_type', 'fpn')
        self.start_level = cfg.start_level
        self.cat_img_feature = cfg.cat_img_feature
        self.drop_psv = getattr(cfg, 'drop_psv', False)
        self.with_upconv_voxel = getattr(cfg, 'with_upconv_voxel', False)
        self.mono = getattr(cfg, 'mono', False)
        self.with_sem_neck = getattr(cfg, 'with_sem_neck', False)
        self.with_spp = getattr(cfg, 'with_spp', True)
        self.extra_sem = getattr(cfg, 'extra_sem', False)

        assert not self.share_upconv or (not self.drop_psv and self.with_upconv_voxel and self.with_upconv)
        assert not getattr(cfg, 'swap_feature', False)

        self.sem_dim = cfg.sem_dim
        self.stereo_dim = cfg.stereo_dim
        self.spp_dim = getattr(cfg, 'spp_dim', 32)
        if self.mono and not self.drop_psv:
            assert self.stereo_dim[-1] > 32

        concat_dim = sum(self.in_dims[self.start_level:])
        if self.with_spp:
            self.spp_branches = nn.ModuleList([
                nn.Sequential(
                    nn.AvgPool2d(s, stride=s),
                    convbn(self.in_dims[-1],
                        self.spp_dim,
                        1, 1, 0,
                        gn=cfg.GN,
                        groups=min(32, self.spp_dim)),
                    nn.ReLU(inplace=True))
                for s in [(64, 64), (32, 32), (16, 16), (8, 8)]])
            concat_dim += self.spp_dim * len(self.spp_branches)

        if self.with_upconv and not self.drop_psv:
            assert self.start_level == 2
            if self.upconv_type == 'fpn':
                self.up_dims = getattr(cfg, 'up_dims', [64, 32])
                self.kernel1 = getattr(cfg, 'kernel1', True)
                self.upconv_module = upconv_module([concat_dim, self.in_dims[1], self.in_dims[0]], self.up_dims, share_upconv=self.share_upconv, final_channels=(self.stereo_dim[-2], self.stereo_dim[-1]), kernel1=self.kernel1)
            elif self.upconv_type == 'cat':
                self.upconv_module = upconv_module_cat([concat_dim, self.in_dims[1], self.in_dims[0]], [128, 32, 32], final_channels=self.stereo_dim[-1])
            elif self.upconv_type == 'catk3':
                self.upconv_module = upconv_module_catk3([concat_dim, self.in_dims[1], self.in_dims[0]], [128, 32, 32], final_channels=(self.stereo_dim[-2], self.stereo_dim[-1]), share_upconv=self.share_upconv)
            elif self.upconv_type == 'catk4':
                self.upconv_module = upconv_module_catk4([concat_dim, self.in_dims[1], self.in_dims[0]], [128, 32, 32], final_channels=(self.stereo_dim[-2], self.stereo_dim[-1]), share_upconv=self.share_upconv)
            else:
                raise ValueError('Invalid upconv type.')
            stereo_dim = 32
        else:
            stereo_dim = concat_dim
            assert self.start_level >= 1

        if not self.drop_psv and not self.share_upconv:
            if (self.with_upconv and self.upconv_type != 'fpn') or self.share_upconv:
                self.lastconv = nn.Identity()
            else:
                self.lastconv = nn.Sequential(
                    convbn(stereo_dim, self.stereo_dim[0], 3, 1, 1, gn=cfg.GN),
                    nn.ReLU(inplace=True),
                    nn.Conv2d(self.stereo_dim[0], self.stereo_dim[1],
                            kernel_size=1,
                            padding=0,
                            stride=1,
                            bias=False))
        if not self.share_upconv:
            if self.cat_img_feature or self.with_sem_neck:
                if self.with_upconv_voxel:
                    assert self.start_level == 2
                    if self.upconv_type == 'fpn':
                        self.up_dims = getattr(cfg, 'up_dims', [64, 32])
                        self.kernel1 = getattr(cfg, 'kernel1', True)
                        self.upconv_module_voxel = upconv_module([concat_dim, self.in_dims[1], self.in_dims[0]], self.up_dims, kernel1=self.kernel1)
                        self.rpnconv = nn.Sequential(
                            convbn(self.up_dims[-1], self.sem_dim[0], 3, 1, 1, 1, gn=cfg.GN),
                            nn.ReLU(inplace=True),
                            convbn(self.sem_dim[0], self.sem_dim[1], 3, 1, 1, gn=cfg.GN),
                            nn.ReLU(inplace=True)
                        )
                        if self.extra_sem:
                            self.extra_rpnconv = nn.Sequential(
                                convbn(self.up_dims[-1], self.sem_dim[0], 3, 1, 1, 1, gn=cfg.GN),
                                nn.ReLU(inplace=True),
                                convbn(self.sem_dim[0], self.sem_dim[1], 3, 1, 1, gn=cfg.GN),
                                nn.ReLU(inplace=True)
                            )
                    elif self.upconv_type == 'cat':
                        self.upconv_module_voxel = upconv_module_cat([concat_dim, self.in_dims[1], self.in_dims[0]], [128, 32, 32], final_channels=self.sem_dim[-1])
                    elif self.upconv_type == 'catk3':
                        self.upconv_module_voxel = upconv_module_catk3([concat_dim, self.in_dims[1], self.in_dims[0]], [128, 32, 32], final_channels=(self.sem_dim[-2], self.sem_dim[-1]))
                    elif self.upconv_type == 'catk4':
                        self.upconv_module_voxel = upconv_module_catk4([concat_dim, self.in_dims[1], self.in_dims[0]], [128, 32, 32], final_channels=(self.sem_dim[-2], self.sem_dim[-1]))
                    else:
                        raise ValueError('Invalid upconv type.')
                else:
                    self.rpnconv = nn.Sequential(
                        convbn(concat_dim, self.sem_dim[0], 3, 1, 1, 1, gn=cfg.GN),
                        nn.ReLU(inplace=True),
                        convbn(self.sem_dim[0], self.sem_dim[1], 3, 1, 1, gn=cfg.GN),
                        nn.ReLU(inplace=True)
                    )

    def forward(self, feats, right=False):
        # import pdb
        # pdb.set_trace()
        feat_shape = tuple(feats[self.start_level].shape[2:])
        assert len(feats) == len(self.in_dims)
        concat_features = feats[self.start_level:]
        if self.with_spp:
            spp_branches = []
            for branch_module in self.spp_branches:
                x = branch_module(feats[-1]) # [1,32,1,4] [1,32,2,9] [1,32,5,19] [1,32,,]
                x = F.interpolate(
                    x, feat_shape,
                    mode='bilinear',
                    align_corners=True) # [1,32,80,312]
                spp_branches.append(x)
            concat_features.extend(spp_branches)

        concat_feature = torch.cat(concat_features, 1)
        stereo_feature = concat_feature

        if not self.drop_psv:
            if self.with_upconv:
                if self.share_upconv:
                    stereo_feature, sem_feature = self.upconv_module([stereo_feature, feats[1], feats[0]])
                    return stereo_feature, sem_feature
                else:
                    stereo_feature = self.upconv_module([stereo_feature, feats[1], feats[0]])

            stereo_feature = self.lastconv(stereo_feature)

        if self.with_upconv_voxel and (self.cat_img_feature or self.with_sem_neck):
            sem_feature = self.upconv_module_voxel([concat_feature, feats[1], feats[0]])
        else:
            sem_feature = concat_feature
    
        if not self.with_upconv_voxel or self.upconv_type == 'fpn':
            if self.cat_img_feature or self.with_sem_neck:
                if self.extra_sem and not right:
                    extra_sem_feature = self.extra_rpnconv(sem_feature)
                sem_feature = self.rpnconv(sem_feature)
                if self.extra_sem and not right:
                    return stereo_feature, (sem_feature, extra_sem_feature)
            else:
                sem_feature = None

        return stereo_feature, sem_feature


class feature_extraction_neck_v2(nn.Module):
    def __init__(self, cfg):
        super(feature_extraction_neck_v2, self).__init__()

        self.cfg = cfg
        self.in_dims = cfg.in_dims
        self.with_upconv = cfg.with_upconv
        self.share_upconv = getattr(cfg, 'share_upconv', False)
        self.upconv_type = getattr(cfg, 'upconv_type', 'fpn')
        self.cat_img_feature = cfg.cat_img_feature
        self.drop_psv = getattr(cfg, 'drop_psv', False)
        self.with_upconv_voxel = getattr(cfg, 'with_upconv_voxel', False)
        self.mono = getattr(cfg, 'mono', False)
        self.with_sem_neck = getattr(cfg, 'with_sem_neck', False)
        self.with_spp = getattr(cfg, 'with_spp', True)
        self.extra_sem = getattr(cfg, 'extra_sem', False)
        assert not self.share_upconv or (not self.drop_psv and self.with_upconv_voxel and self.with_upconv)
        assert not getattr(cfg, 'swap_feature', False)
        

        self.sem_dim = cfg.sem_dim
        self.stereo_dim = cfg.stereo_dim
        self.up_scales = cfg.upscales
        self.spp_dim = getattr(cfg, 'spp_dim', 32)
        if self.mono and not self.drop_psv:
            assert self.stereo_dim[-1] > 32

        # concat_dim = sum(self.in_dims[self.start_level:])
        concat_dim = self.in_dims[-1]
        if self.with_spp:
            self.spp_branches = nn.ModuleList([
                nn.Sequential(
                    nn.AvgPool2d(s, stride=s),
                    convbn(self.in_dims[-1],
                           self.spp_dim,
                           1, 1, 0,
                           gn=cfg.GN,
                           groups=min(32, self.spp_dim)),
                    nn.ReLU(inplace=True))
                for s in [(64, 64), (32, 32), (16, 16), (8, 8)]])
            concat_dim += self.spp_dim * len(self.spp_branches)

        if self.with_upconv and not self.drop_psv:
            if self.with_upconv and not self.drop_psv:
                # assert self.start_level == 2
                if self.upconv_type == 'cat_fpn':
                    self.upconv_module = upconv_module_fpn(self.in_dims[::-1], self.cfg.up_dims[::-1],
                                                             final_channels=(self.stereo_dim[-2], self.stereo_dim[-1]),
                                                             final_sem_channel=self.sem_dim[-1],
                                                             up_scales = self.up_scales,
                                                             share_upconv=self.share_upconv)
                elif self.upconv_type == 'bifpn':
                    self.upconv_module = upconv_module_bifpn(self.in_dims[::-1], 
                                                             final_channels=[self.stereo_dim[0]],
                                                             share_upconv=self.share_upconv)
                elif self.upconv_type == 'cat_fpn_2':
                        self.up_dims =  getattr(cfg, 'up_dims', None)
                        self.kernel1 = getattr(cfg, 'kernel1', True)
                        assert self.up_dims is not None
                        self.upconv_module = upconv_module_fpn_2(self.in_dims[::-1], self.up_dims[::-1], up_scales=getattr(cfg, 'upscales', [2]),
                                                                       final_channels=(self.sem_dim[-2], self.sem_dim[-1]),
                                                                       kernel1 = self.kernel1, share_upconv=self.share_upconv)  # up_channels: [128, 32, 32, 32]                                             
                elif self.upconv_type == 'cat_bifpn':
                    self.upscales = getattr(cfg, 'upscales', [2])
                    compound_coef = getattr(cfg, 'compound_coef', 0)
                    self.upconv_module = upconv_module_bifpn_2(self.in_dims, compound_coef=compound_coef, final_channels=(self.sem_dim[-2], self.sem_dim[-1]), up_scales = self.up_scales[::-1], share_upconv=self.share_upconv)
                else:
                    raise ValueError('Invalid upconv type.')
            stereo_dim = 32
        else:
            stereo_dim = concat_dim
            # assert self.start_level >= 1


        if not self.drop_psv and not self.share_upconv:
            if self.with_upconv or self.share_upconv:
                self.lastconv = nn.Identity()
            else:
                self.lastconv = nn.Sequential(
                    convbn(stereo_dim, self.stereo_dim[0], 3, 1, 1, gn=cfg.GN),
                    nn.ReLU(inplace=True),
                    nn.Conv2d(self.stereo_dim[0], self.stereo_dim[1],
                              kernel_size=1,
                              padding=0,
                              stride=1,
                              bias=False))
        if not self.share_upconv:
            if self.cat_img_feature or self.with_sem_neck:
                if self.with_upconv_voxel:
                    if self.upconv_type == 'cat_fpn':
                        self.upconv_module_voxel = upconv_module_fpn(self.in_dims[::-1], [64, 64, 64, 64],
                                                                       final_channels=(self.sem_dim[-2], self.sem_dim[-1]))  # up_channels: [128, 32, 32, 32]
                    elif self.upconv_type == 'bifpn':
                        self.upconv_module = upconv_module_bifpn(self.in_dims[::-1],
                                                                    final_channels=[self.sem_dim[0]])
                    elif self.upconv_type == 'cat_fpn_2':
                        self.up_dims =  getattr(cfg, 'up_dims', None)
                        self.kernel1 = getattr(cfg, 'kernel1', True)
                        assert self.up_dims is not None
                        self.upconv_module_voxel = upconv_module_fpn_2(self.in_dims[::-1], self.up_dims[::-1], up_scales=getattr(cfg, 'upscales', [2]),
                                                                       final_channels=(self.sem_dim[-2], self.sem_dim[-1]),
                                                                       kernel1 = self.kernel1, share_upconv=self.share_upconv)  # up_channels: [128, 32, 32, 32]                                             
                    else:
                        raise ValueError('Invalid upconv type.')
                else:
                    self.rpnconv = nn.Sequential(
                        convbn(concat_dim, self.sem_dim[0], 3, 1, 1, 1, gn=cfg.GN),
                        nn.ReLU(inplace=True),
                        convbn(self.sem_dim[0], self.sem_dim[1], 3, 1, 1, gn=cfg.GN),
                        nn.ReLU(inplace=True)
                    )

    def forward(self, feats, right=False):
        # import pdb
        # pdb.set_trace()
        # feat_shape = tuple(feats[self.start_level].shape[2:])
        assert len(feats) == len(self.in_dims)

        if not self.drop_psv:
            if self.with_upconv:
                if self.share_upconv:
                    # import pdb;pdb.set_trace()
                    stereo_feature, sem_feature = self.upconv_module(feats[::-1])
                    return stereo_feature, sem_feature
                # stereo_feature = self.upconv_module(feats[::-1])
            else:
                stereo_feature = feats[-1]
            stereo_feature = self.lastconv(stereo_feature)

        if self.with_upconv_voxel and (self.cat_img_feature or self.with_sem_neck):
            sem_feature = self.upconv_module_voxel(feats[::-1])
        else:
            sem_feature = feats[-1]


        return stereo_feature, sem_feature

class CrossCueFusion(nn.Module):
    def __init__(self, cv_hypo_num=32, mid_dim=32, input_size=(256,512)):
        super().__init__()
        self.cv_hypo_num = cv_hypo_num
        self.mid_dim = mid_dim
        self.residual_connection =True
        self.is_reduce = True if input_size[1]>650 else False

        if not self.is_reduce:
            self.mono_expand = nn.Sequential(
                nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2),
                nn.BatchNorm2d(self.mid_dim),
                nn.ReLU(inplace=True),
                nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2),
                nn.BatchNorm2d(self.mid_dim),
                nn.ReLU(inplace=True)
            )
            self.multi_expand = nn.Sequential(
                nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2),
                nn.BatchNorm2d(self.mid_dim),
                nn.ReLU(inplace=True),
                nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2),
                nn.BatchNorm2d(self.mid_dim),
                nn.ReLU(inplace=True)
            )
        else:
            self.mono_expand = nn.Sequential(
                nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2),
                nn.BatchNorm2d(self.mid_dim),
                nn.ReLU(inplace=True),
                nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2),
                nn.BatchNorm2d(self.mid_dim),
                nn.ReLU(inplace=True),
                nn.Conv2d(self.cv_hypo_num, self.mid_dim, kernel_size=3, padding=1, stride=2),
                nn.BatchNorm2d(self.mid_dim),
                nn.ReLU(inplace=True)
            )

class upconv_module_bifpn_2(nn.Module):
    def __init__(self, in_channels, final_channels, compound_coef=0, up_scales=[2, 2, 4], share_upconv=False):
        super(upconv_module_bifpn_2, self).__init__()
        self.in_channels = in_channels
        self.final_channels = final_channels
        self.up_scales = up_scales
        self.share_upconv = share_upconv
        self.fpn_num_filters, self.fpn_cell_repeats = self.get_bifpn_parms(compound_coef)
        self.bifpn = nn.Sequential(
                *[BiFPN(self.fpn_num_filters,
                        in_channels,
                        up_scales,
                        True if _ == 0 else False,
                        attention=True if compound_coef < 6 else False,
                        use_p8=compound_coef > 7)
                for _ in range(self.fpn_cell_repeats)])
        if self.share_upconv:
            self.lastconv = nn.Conv2d(self.fpn_num_filters, final_channels[1],
                                      kernel_size=1,
                                      padding=0,
                                      stride=1,
                                      bias=False)
            self.rpnconv = nn.Conv2d(self.fpn_num_filters, final_channels[1],
                                     kernel_size=1,
                                     padding=0,
                                     stride=1,
                                     bias=False)

    def get_bifpn_parms(self, compound_coef):
        fpn_num_filters = [64, 88, 112, 160, 224, 288, 384, 384, 384]
        fpn_cell_repeats = [2, 4, 5, 6, 7, 7, 8, 8, 8] #0:3
        # conv_channel_coef = {
        #     # the channels of P3/P4/P5.
        #     0: [40, 112, 320],
        #     1: [40, 112, 320],
        #     2: [48, 120, 352],
        #     3: [48, 136, 384],
        #     4: [56, 160, 448],
        #     5: [64, 176, 512],
        #     6: [72, 200, 576],
        #     7: [72, 200, 576],
        #     8: [80, 224, 640],
        # }
        return fpn_num_filters[compound_coef], fpn_cell_repeats[compound_coef]

    def forward(self, feats):
        feat = self.bifpn(feats[::-1])
        if self.share_upconv:
            return self.lastconv(feat[0]), self.rpnconv(feat[0])
        else:
            return feat[0]