import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.ops import DeformConv2d


class Swish(nn.Module):
    def __init__(self, inplace=True):
        super(Swish, self).__init__()
        self.inplace = inplace

    def forward(self, x):
        if self.inplace:
            x.mul_(F.sigmoid(x))
            return x
        else:
            return x * F.sigmoid(x)


def get_activation(name="silu", inplace=True):
    if name is None:
        return nn.Identity()

    if isinstance(name, str):
        if name == "silu":
            module = nn.SiLU(inplace=inplace)
        elif name == "relu":
            module = nn.ReLU(inplace=inplace)
        elif name == "lrelu":
            module = nn.LeakyReLU(0.1, inplace=inplace)
        elif name == 'swish':
            module = Swish(inplace=inplace)
        elif name == 'hardsigmoid':
            module = nn.Hardsigmoid(inplace=inplace)
        elif name == 'hardswish':
            module = nn.Hardswish(inplace=inplace)
        else:
            raise AttributeError("Unsupported act type: {}".format(name))
        return module
    elif isinstance(name, nn.Module):
        return name
    else:
        raise AttributeError("Unsupported act type: {}".format(name))


class ConvBNLayer(nn.Module):

    def __init__(self,
                 ch_in,
                 ch_out,
                 filter_size=3,
                 stride=1,
                 groups=1,
                 padding=0,
                 act=None):
        super(ConvBNLayer, self).__init__()
        self.conv = nn.Conv2d(
            in_channels=ch_in,
            out_channels=ch_out,
            kernel_size=filter_size,
            stride=stride,
            padding=padding,
            groups=groups,
            bias=False
        )
        self.bn = nn.BatchNorm2d(ch_out)
        self.act = get_activation(act, inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.act(x)

        return x

class SEModule(nn.Module):
    def __init__(self, channel, reduction=4):
        super(SEModule, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.conv1 = ConvBNLayer(
            ch_in=channel,
            ch_out=channel // reduction,
            filter_size=1,
            stride=1,
            padding=0,
            act="relu"
        )
        self.conv2 = ConvBNLayer(
            ch_in=channel // reduction,
            ch_out=channel,
            filter_size=1,
            stride=1,
            padding=0,
            act="hardsigmoid"
        )

    def forward(self, x):
        indentity = x
        x = self.avg_pool(x)
        x = self.conv1(x)
        x = self.conv2(x)
        x = torch.multiply(x=indentity, y=x)
        return x

class DepthwiseSeparable(nn.Module):
    def __init__(self, ch_in, ch_out, stride, dw_size=3, use_se=False):
        super(DepthwiseSeparable, self).__init__()
        self.use_se = use_se
        self.dw_conv = ConvBNLayer(
            ch_in=ch_in,
            ch_out=ch_in,
            filter_size=dw_size,
            stride=stride,
            groups=ch_in,
            padding=(dw_size - 1) // 2,
            act="hardswish"
        )
        if use_se:
            self.se = SEModule(ch_in)
        self.pw_conv = ConvBNLayer(
            ch_in=ch_in,
            ch_out=ch_out,
            filter_size=1,
            stride=1,
            padding=(1 - 1) // 2,
            act="hardswish"
        )
    
    def forward(self, x):
        x = self.dw_conv(x)
        if self.use_se:
            x = self.se(x)
        x = self.pw_conv(x)
        return x

class Channel_T(nn.Module):
    def __init__(self, ch_in=[116, 232, 464], ch_out=96, act="lrelu"):
        super(Channel_T, self).__init__()
        self.convs = nn.ModuleList()
        for i in range(len(ch_in)):
            self.convs.append(
                ConvBNLayer(ch_in=ch_in[i], ch_out=ch_out, filter_size=1, act=act)
            )
    
    def forward(self, x):
        outs = [self.convs[i](x[i]) for i in range(len(x))]
        return outs

class DPModule(nn.Module):
    """
    Depth-wise and point-wise module.
     Args:
        in_channel (int): The input channels of this Module.
        out_channel (int): The output channels of this Module.
        kernel_size (int): The conv2d kernel size of this Module.
        stride (int): The conv2d's stride of this Module.
        act (str): The activation function of this Module,
                   Now support `leaky_relu` and `hard_swish`.
    """

    def __init__(self,
                 in_channel=96,
                 out_channel=96,
                 kernel_size=3,
                 stride=1,
                 act='lrelu',
                 use_act_in_out=True):
        super(DPModule, self).__init__()
        # initializer = nn.init.kaiming_uniform_()
        self.use_act_in_out = use_act_in_out
        self.dwconv = nn.Conv2d(
            in_channels=in_channel,
            out_channels=out_channel,
            kernel_size=kernel_size,
            stride=stride,
            padding=(kernel_size - 1) // 2,
            groups=out_channel,
            bias=False
        )
        self.bn1 = nn.BatchNorm2d(out_channel)
        self.pwconv = nn.Conv2d(
            in_channels=out_channel,
            out_channels=out_channel,
            kernel_size=1,
            padding=0,
            groups=1,
            bias=False
        )
        self.bn2 = nn.BatchNorm2d(out_channel)
        
        if act == "hard_swish":
            act = 'hardswish'
        elif act == "leakyrelu":
            act = "lrelu"
        self.act = get_activation(act)

        for m in self.modules():
             if isinstance(m, nn.Conv2d):
                 nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
             elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                 nn.init.constant_(m.weight, 1)
                 nn.init.constant_(m.bias, 0)

    def forward(self, x):
        x = self.bn1(self.dwconv(x))
        if self.act:
            x = self.act(x)
        x = self.bn2(self.pwconv(x))
        if self.use_act_in_out and self.act:
            x = self.act(x)
        return x

class DeformableConvV2(nn.Layer):
    def __init__(self,
                 in_channels,
                 out_channels,
                 kernel_size,
                 stride=1,
                 padding=0,
                 dilation=1,
                 groups=1,
                 weight_attr=None,
                 bias_attr=None,
                 lr_scale=1,
                 regularizer=None,
                 skip_quant=False,
                 dcn_bias_lr_scale=2.):
        super(DeformableConvV2, self).__init__()
        self.offset_channel = 2 * kernel_size**2
        self.mask_channel = kernel_size**2

        if lr_scale == 1 and regularizer is None:
            offset_bias_attr = False
            # nn.init.constant_(self.conv_offset.bias, 0.)
        else:
            # nn.init.constant_(self.conv_offset.bias, 0.)
            offset_bias_attr = False
            '''    
            offset_bias_attr = ParamAttr(
                initializer=Constant(0.),
                learning_rate=lr_scale,
                regularizer=regularizer)
            '''

        self.conv_offset = nn.Conv2d(
            in_channels,
            3 * kernel_size**2,
            kernel_size,
            stride=stride,
            padding=(kernel_size - 1) // 2,
            bias=offset_bias_attr
        )

        if skip_quant:
            self.conv_offset.skip_quant = True

        if bias_attr:
            # in FCOS-DCN head, specifically need learning_rate and regularizer
            dcn_bias_attr = False
        else:
            # in ResNet backbone, do not need bias
            dcn_bias_attr = False

        self.conv_dcn = DeformConv2d(
            in_channels,
            out_channels,
            kernel_size,
            stride=stride,
            padding=(kernel_size - 1) // 2 * dilation,
            dilation=dilation,
            groups=groups,
            bias=dcn_bias_attr)
    
    def _init_weights(self):
        pass

    def forward(self, x):
        offset_mask = self.conv_offset(x)
        offset, mask = torch.split(
            offset_mask,
            num_or_sections=[self.offset_channel, self.mask_channel],
            axis=1)
        mask = F.sigmoid(mask)
        y = self.conv_dcn(x, offset, mask=mask)
        return y

class ConvNormLayer(nn.Module):
    def __init__(self, ch_in=96, 
                       ch_out=96, 
                       filter_size=3, 
                       stride=1, 
                       groups=1,
                       norm_type='bn',
                       norm_decay=0.,
                       norm_groups=32,
                       use_dcn=False,
                       bias_on=False,
                       lr_scale=1.,
                       freeze_norm=False,
                       skip_quant=False,
                       dcn_lr_scale=2.,
                       # dcn_regularizer=L2Decay(0.)
                       ):
        super(ConvBNLayer, self).__init__()
        assert norm_type in ['bn', 'sync_bn', 'gn', None]

        if not use_dcn:
            self.conv = nn.Conv2d(
                in_channels=ch_in,
                out_channels=ch_out,
                kernel_size=filter_size,
                stride=stride,
                padding=(filter_size - 1) // 2,
                groups=groups,
                bias=bias_on
            )
            if skip_quant:
                self.conv.skip_quant = True
        else:
            # in FCOS-DCN head, specifically need learning_rate and regularizer
            self.conv = DeformableConvV2(
                in_channels=ch_in,
                out_channels=ch_out,
                kernel_size=filter_size,
                stride=stride,
                padding=(filter_size - 1) // 2,
                groups=groups,
                weight_attr=None,
                bias_attr=True,
                lr_scale=dcn_lr_scale,
                # regularizer=dcn_regularizer,
                # dcn_bias_regularizer=dcn_regularizer,
                dcn_bias_lr_scale=dcn_lr_scale,
                skip_quant=skip_quant
            )
        
        # norm_lr = 0. if freeze_norm else 1.
        if norm_type in ['bn', 'sync_bn']:
            self.norm = nn.BatchNorm2d(ch_out)
        elif norm_type == 'gn':
            self.norm = nn.GroupNorm(
                num_groups=norm_groups,
                num_channels=ch_out
            )
        else:
            self.norm = None
        
    def forward(self, x):
        out = self.conv(x)
        if self.norm is not None:
            out  = self.norm(out)
        return out

class DGQP(nn.Module):
    """Distribution-Guided Quality Predictor of GFocal head
    Args:
        reg_topk (int): top-k statistics of distribution to guide LQE
        reg_channels (int): hidden layer unit to generate LQE
        add_mean (bool): Whether to calculate the mean of top-k statistics
    """

    def __init__(self, reg_topk=4, reg_channels=64, add_mean=True):
        super(DGQP, self).__init__()
        self.reg_topk = reg_topk
        self.reg_channels = reg_channels
        self.add_mean = add_mean
        self.total_dim = reg_topk
        if add_mean:
            self.total_dim += 1
        self.reg_conv1 = nn.Conv2d(
                            in_channels=4 * self.total_dim,
                            out_channels=self.reg_channels,
                            kernel_size=1,)

        self.reg_conv2 = nn.Conv2d(
                in_channels=self.reg_channels,
                out_channels=1,
                kernel_size=1,)
        for m in self.modules():
            if isinstance(m ,nn.Conv2d):
                m.weight.data.normal_(mean=0., std=0.01)
                m.bias.data.fill_(0.)

    def forward(self, x):
        """Forward feature from the regression head to get integral result of
        bounding box location.
        Args:
            x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
                n is self.reg_max.
        Returns:
            x (Tensor): Integral result of box locations, i.e., distance
                offsets from the box center in four directions, shape (N, 4).
        """
        N, _, H, W = x.shape[:]
        prob = F.softmax(x.reshape([N, 4, -1, H, W]), dim=2)
        prob_topk, _ = prob.topk(self.reg_topk, dim=2)
        if self.add_mean:
            stat = torch.cat([prob_topk, prob_topk.mean(axis=2, keepdim=True)], dim=2)
        else:
            stat = prob_topk
        y = F.relu(self.reg_conv1(stat.reshape([N, -1, H, W])))
        y = F.sigmoid(self.reg_conv2(y))
        return y

class Integral(nn.Layer):
    """A fixed layer for calculating integral result from distribution.
    This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
    P(y_i) denotes the softmax vector that represents the discrete distribution
    y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
    Args:
        reg_max (int): The maximal value of the discrete set. Default: 16. You
            may want to reset it according to your new dataset or related
            settings.
    """

    def __init__(self, reg_max=16):
        super(Integral, self).__init__()
        self.reg_max = reg_max
        self.register_buffer('project',
                             torch.linspace(0, self.reg_max, self.reg_max + 1))

    def forward(self, x):
        """Forward feature from the regression head to get integral result of
        bounding box location.
        Args:
            x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
                n is self.reg_max.
        Returns:
            x (Tensor): Integral result of box locations, i.e., distance
                offsets from the box center in four directions, shape (N, 4).
        """
        x = F.softmax(x.reshape([-1, self.reg_max + 1]), axis=1)
        x = F.linear(x, self.project)
        if self.training:
            x = x.reshape([-1, 4])
        return x