import math
import torch
import torch.nn as nn
# import torch.fx
from torch import Tensor
import torch.nn.functional as F
from collections import OrderedDict
from batchrenorm import BatchRenorm2d
#-------------------------------------------------#
#   MISH激活函数
#-------------------------------------------------#
class Mish(nn.Module):
    def __init__(self):
        super(Mish, self).__init__()

    def forward(self, x):
        return x * torch.tanh(F.softplus(x))

#---------------------------------------------------#
#   卷积块 -> 卷积 + 标准化 + 激活函数        ：CBM;把BatchNorm2d换成了BatchRenorm2d;
#   Conv2d + BatchNormalization + Mish
#---------------------------------------------------#
class BasicConv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1):
        super(BasicConv, self).__init__()

        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, kernel_size//2, bias=False)
        self.dropblock = DropBlock()
        self.bn = nn.BatchNorm2d(out_channels)
        # self.bn = BatchRenorm2d(out_channels)
        self.activation = Mish()

    def forward(self, x):
        x = self.conv(x)
        x = self.dropblock(x)
        x = self.bn(x)
        x = self.activation(x)
        return x

#---------------------------------------------------#
#                   CBL结构
#-----------------out_channels----------------------#
def conv2d(filter_in, filter_out, kernel_size, stride=1):
    pad = (kernel_size - 1) // 2 if kernel_size else 0
    return nn.Sequential(OrderedDict([
        ("conv", nn.Conv2d(filter_in, filter_out, kernel_size=kernel_size, stride=stride, padding=pad, bias=False)),
        ("bn", nn.BatchNorm2d(filter_out)),
        ("relu", nn.LeakyReLU(0.1)),
    ]))
#---------------------------------------------------#
#   SPP结构，利用不同大小的池化核进行池化
#   池化后堆叠
#---------------------------------------------------#
class SPPLayer(torch.nn.Module):
    def __init__(self, num_levels, pool_type='max_pool'):
        super(SPPLayer, self).__init__()
        self.num_levels = num_levels
        self.pool_type = pool_type
    def forward(self, x):
        num, c, h, w = x.size() # num:样本数量 c:通道数 h:高 w:宽
        for i in range(self.num_levels):
            level = i+1
            kernel_size = (math.ceil(h / level), math.ceil(w / level))
            stride = (math.ceil(h / level), math.ceil(w / level))
            pooling = (math.floor((kernel_size[0]*level-h+1)/2), math.floor((kernel_size[1]*level-w+1)/2))
            # 选择池化方式
            if self.pool_type == 'max_pool':
                tensor = F.max_pool2d(x, kernel_size=kernel_size, stride=stride, padding=pooling).view(num, -1)
            else:
                tensor = F.avg_pool2d(x, kernel_size=kernel_size, stride=stride, padding=pooling).view(num, -1)
            # 展开、拼接
            if (i == 0):
                x_flatten = tensor.view(num, -1)
            else:
                x_flatten = torch.cat((x_flatten, tensor.view(num, -1)), 1)
        return x_flatten
class SpatialPyramidPooling(nn.Module):
    def __init__(self, pool_sizes=[5, 9, 13]):
        super(SpatialPyramidPooling, self).__init__()
        self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size//2) for pool_size in pool_sizes])

    def forward(self, x):
        features = [maxpool(x) for maxpool in self.maxpools[::-1]]
        features = torch.cat(features + [x], dim=1)
        return features

class DropBlock(nn.Module):
    # ---------------------------------------------------#
    #   随机drop  ：Dropblock;
    # ---------------------------------------------------#
    def __init__(self, drop_prob=0.2, block_size=7):
        super(DropBlock, self).__init__()

        self.drop_prob = drop_prob
        self.block_size = block_size

    def forward(self, x):
        if self.drop_prob == 0:
            return x
        # 设置gamma,比gamma小的设置为1,大于gamma的为0,对应第五步
        # 这样计算可以得到丢弃的比率的随机点个数
        gamma = self.drop_prob / (self.block_size ** 2)
        mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).float()

        mask = mask.to(x.device)

        # compute block mask
        block_mask = self._compute_block_mask(mask)
        # apply block mask,为算法图的第六步
        out = x * block_mask[:, None, :, :]
        # Normalize the features,对应第七步
        out = out * block_mask.numel() / block_mask.sum()
        return out

    def _compute_block_mask(self, mask):
        # 取最大值,这样就能够取出一个block的块大小的1作为drop,当然需要翻转大小,使得1为0,0为1
        block_mask = F.max_pool2d(input=mask[:, None, :, :],
                                  kernel_size=(self.block_size,
                                               self.block_size),
                                  stride=(1, 1),
                                  padding=self.block_size // 2)

        if self.block_size % 2 == 0:
            # 如果block大小是2的话,会边界会多出1,要去掉才能输出与原图一样大小.
            block_mask = block_mask[:, :, :-1, :-1]
        block_mask = 1 - block_mask.squeeze(1)

        return block_mask

#---------------------------------------------------#
#   空洞卷积  ：DBM  ：把BatchNorm2d换成了BatchRenorm2d;
#---------------------------------------------------#
class CDilated(nn.Module):
    '''
    This class defines the dilated convolution.
    '''
    def __init__(self, nIn, nOut, kSize, stride=1, d=1):
        '''
        :param nIn: number of input channels
        :param nOut: number of output channels
        :param kSize: kernel size
        :param stride: optional stride rate for down-sampling
        :param d: optional dilation rate
        '''
        super().__init__()
        padding = int((kSize - 1)/2) * d
        self.conv = nn.Conv2d(nIn, nOut, (kSize, kSize), stride=stride, padding=(padding, padding), bias=False, dilation=d)
        self.dropblock = DropBlock()
        self.bn = nn.BatchNorm2d(nOut)
        # self.bn = BatchRenorm2d(nOut)
        self.activation = Mish()

    def forward(self, input):
        '''
        :param input: input feature map
        :return: transformed feature map
        '''
        x = self.conv(input)
        x = self.dropblock(x)
        x = self.bn(x)
        x = self.activation(x)
        return x

# #---------------------------------------------------#
# #   CSPdarknet的结构块的组成部分    ：res unit
# #   内部堆叠的残差块
# #---------------------------------------------------#
# class Resblock(nn.Module):
#     def __init__(self, channels, hidden_channels=None):
#         super(Resblock, self).__init__()
#
#         if hidden_channels is None:
#             hidden_channels = channels
#
#         self.block = nn.Sequential(
#             BasicConv(channels, hidden_channels, 1),
#             BasicConv(hidden_channels, channels, 3)
#         )
#
#     def forward(self, x):
#         return x + self.block(x)
#
# #--------------------------------------------------------------------#
# #   CSPdarknet的结构块        ：CSPX     无空洞卷积
# #   首先利用ZeroPadding2D和一个步长为2x2的卷积块进行高和宽的压缩
# #   然后建立一个大的残差边shortconv、这个大残差边绕过了很多的残差结构
# #   主干部分会对num_blocks进行循环，循环内部是残差结构。
# #   对于整个CSPdarknet的结构块，就是一个大残差块+内部多个小残差块
# #--------------------------------------------------------------------#
# class Resblock_body(nn.Module):
#     def __init__(self, in_channels, out_channels, num_blocks, first):
#         super(Resblock_body, self).__init__()
#         #----------------------------------------------------------------#
#         #   利用一个步长为2x2的卷积块进行高和宽的压缩
#         #----------------------------------------------------------------#
#         self.downsample_conv = BasicConv(in_channels, out_channels, 3, stride=2)
#
#         if first:
#             #--------------------------------------------------------------------------#
#             #   然后建立一个大的残差边self.split_conv0、这个大残差边绕过了很多的残差结构
#             #--------------------------------------------------------------------------#
#             self.split_conv0 = BasicConv(out_channels, out_channels, 1)
#
#             #----------------------------------------------------------------#
#             #   主干部分会对num_blocks进行循环，循环内部是残差结构。
#             #----------------------------------------------------------------#
#             self.split_conv1 = BasicConv(out_channels, out_channels, 1)
#             self.blocks_conv = nn.Sequential(
#                 Resblock(channels=out_channels, hidden_channels=out_channels//2),
#                 BasicConv(out_channels, out_channels, 1)
#             )
#
#             self.concat_conv = BasicConv(out_channels*2, out_channels, 1)
#         else:
#             #--------------------------------------------------------------------------#
#             #   然后建立一个大的残差边self.split_conv0、这个大残差边绕过了很多的残差结构
#             #--------------------------------------------------------------------------#
#             self.split_conv0 = BasicConv(out_channels, out_channels//2, 1)
#
#             #----------------------------------------------------------------#
#             #   主干部分会对num_blocks进行循环，循环内部是残差结构。
#             #----------------------------------------------------------------#
#             self.split_conv1 = BasicConv(out_channels, out_channels//2, 1)
#             self.blocks_conv = nn.Sequential(
#                 *[Resblock(out_channels//2) for _ in range(num_blocks)],
#                 BasicConv(out_channels//2, out_channels//2, 1)
#             )
#
#             self.concat_conv = BasicConv(out_channels, out_channels, 1)
#
#     def forward(self, x):
#         x = self.downsample_conv(x)
#
#         x0 = self.split_conv0(x)
#
#         x1 = self.split_conv1(x)
#         x1 = self.blocks_conv(x1)
#
#         #------------------------------------#
#         #   将大残差边再堆叠回来
#         #------------------------------------#
#         x = torch.cat([x1, x0], dim=1)
#         #------------------------------------#
#         #   最后对通道数进行整合
#         #------------------------------------#
#         x = self.concat_conv(x)
#
#         return x
# #---------------------------------------------------#
# #   CSPdarknet53 的主体部分      无空洞卷积
# #   输入为一张416x416x3的图片
# #   输出为三个有效特征层
# #---------------------------------------------------#
# class CSPDarkNet(nn.Module):
#     def __init__(self, layers):
#         super(CSPDarkNet, self).__init__()
#         self.inplanes = 8  #需要改动的初始频道数
#         # self.inplanes = 32
#         # 416,416,3 -> 416,416,32
#         self.conv1 = BasicConv(3, self.inplanes, kernel_size=3, stride=1)
#         self.feature_channels = [16,24,64,112,320]             #需要改动的频道数
#         # self.feature_channels = [64, 128, 256, 512, 1024]
#         self.stages = nn.ModuleList([
#             # 416,416,32 -> 208,208,64
#             Resblock_body(self.inplanes, self.feature_channels[0], layers[0], first=True),
#             # 208,208,64 -> 104,104,128
#             Resblock_body(self.feature_channels[0], self.feature_channels[1], layers[1], first=False),
#             # 104,104,128 -> 52,52,256
#             Resblock_body(self.feature_channels[1], self.feature_channels[2], layers[2], first=False),
#             # 52,52,256 -> 26,26,512
#             Resblock_body(self.feature_channels[2], self.feature_channels[3], layers[3], first=False),
#             # 26,26,512 -> 13,13,1024
#             Resblock_body(self.feature_channels[3], self.feature_channels[4], layers[4], first=False)
#         ])
#
#         self.num_features = 3
#         for m in self.modules():
#             if isinstance(m, nn.Conv2d):
#                 n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
#                 m.weight.data.normal_(0, math.sqrt(2. / n))
#             elif isinstance(m, nn.BatchNorm2d):
#                 m.weight.data.fill_(1)
#                 m.bias.data.zero_()
#
#
#     def forward(self, x):
#         x = self.conv1(x)
#
#         x = self.stages[0](x)
#         x = self.stages[1](x)
#         out3 = self.stages[2](x)
#         out4 = self.stages[3](out3)
#         out5 = self.stages[4](out4)
#
#         return out3, out4, out5


def stochastic_depth(input: Tensor,shortcut:Tensor, p: float, mode: str, training: bool = True) -> Tensor:
    """
    Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
    <https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
    branches of residual architectures.

    Args:
        input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one
                    being its batch i.e. a batch with ``N`` rows.
        p (float): probability of the input to be zeroed.
        mode (str): ``"batch"`` or ``"row"``.
                    ``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
                    randomly selected rows from the batch.
        training: apply stochastic depth if is ``True``. Default: ``True``
    Returns:
        Tensor[N, ...]: The randomly zeroed tensor.
    """
    if p < 0.0 or p > 1.0:
        raise ValueError("drop probability has to be between 0 and 1, but got {}".format(p))
    if mode not in ["batch", "row"]:
        raise ValueError("mode has to be either 'batch' or 'row', but got {}".format(mode))
    if not training or p == 0.0:
        return torch.add(input,shortcut)

    survival_rate = 1.0 - p
    if mode == "row":
        size = [input.shape[0]] + [1] * (input.ndim - 1)
    else:
        size = [1] * input.ndim
    noise = torch.empty(size, dtype=input.dtype, device=input.device)
    noise = noise.bernoulli_(survival_rate).div_(survival_rate)
    return torch.add(input * noise,shortcut)
# torch.fx.wrap('stochastic_depth')

class StochasticDepth(nn.Module):
    """
    See :func:`stochastic_depth`.
    """
    def __init__(self, p: float, mode: str) -> None:
        super().__init__()
        self.p = p
        self.mode = mode
    def forward(self, input: Tensor,shortcut:Tensor) -> Tensor:
        return stochastic_depth(input,shortcut, self.p, self.mode, self.training)
    def __repr__(self) -> str:
        tmpstr = self.__class__.__name__ + '('
        tmpstr += 'p=' + str(self.p)
        tmpstr += ', mode=' + str(self.mode)
        tmpstr += ')'
        return tmpstr


#---------------------------------------------------#
#   CSPdarknet的结构块的组成部分    ：res unit
#   内部堆叠的残差块
#---------------------------------------------------#
class Resblock(nn.Module):
    def __init__(self, channels, hidden_channels=None):
        super(Resblock, self).__init__()

        if hidden_channels is None:
            hidden_channels = channels

        self.block = nn.Sequential(
            BasicConv(channels, hidden_channels, 1),
            BasicConv(hidden_channels, channels, 3)
        )

    def forward(self, x):
        # return x + self.block(x)
        stoDepth = StochasticDepth(p=0.5, mode='batch')
        return stoDepth(x, self.block(x))





#--------------------------------------------------------------------#
#   CSPdarknet的结构块        ：CSPX+空洞卷积
#   首先利用ZeroPadding2D和一个步长为2x2的卷积块进行高和宽的压缩
#   然后建立一个大的残差边shortconv、这个大残差边绕过了很多的残差结构
#   主干部分会对num_blocks进行循环，循环内部是残差结构。
#   对于整个CSPdarknet的结构块，就是一个大残差块+内部多个小残差块
#--------------------------------------------------------------------#
class Resblock_body(nn.Module):
    def __init__(self, in_channels, out_channels, num_blocks, first, DorC):
        super(Resblock_body, self).__init__()
        #----------------------------------------------------------------#
        #   利用一个步长为2x2的卷积块进行高和宽的压缩; 先舍弃空洞卷积.
        #----------------------------------------------------------------#
        # if first:
        #     self.downsample_conv = CDilated(in_channels, out_channels, 3, stride=2, d=2)
        # elif DorC:
        #     self.downsample_conv = CDilated(in_channels, out_channels, 3, stride=2, d=2)
        # else:
        #     self.downsample_conv = BasicConv(in_channels, out_channels, 3, stride=2)
        self.downsample_conv = BasicConv(in_channels, out_channels, 3, stride=2)
        if first:
            #--------------------------------------------------------------------------#
            #   然后建立一个大的残差边self.split_conv0、这个大残差边绕过了很多的残差结构
            #--------------------------------------------------------------------------#
            self.split_conv0 = BasicConv(out_channels, out_channels, 1)

            #----------------------------------------------------------------#
            #   主干部分会对num_blocks进行循环，循环内部是残差结构。
            #----------------------------------------------------------------#
            self.split_conv1 = BasicConv(out_channels, out_channels, 1)
            self.blocks_conv = nn.Sequential(
                Resblock(channels=out_channels, hidden_channels=out_channels//2),
                BasicConv(out_channels, out_channels, 1)
            )

            self.concat_conv = BasicConv(out_channels*2, out_channels, 1)
        else:
            #--------------------------------------------------------------------------#
            #   然后建立一个大的残差边self.split_conv0、这个大残差边绕过了很多的残差结构
            #--------------------------------------------------------------------------#
            self.split_conv0 = BasicConv(out_channels, out_channels//2, 1)

            #----------------------------------------------------------------#
            #   主干部分会对num_blocks进行循环，循环内部是残差结构。
            #----------------------------------------------------------------#
            self.split_conv1 = BasicConv(out_channels, out_channels//2, 1)
            self.blocks_conv = nn.Sequential(
                *[Resblock(out_channels//2) for _ in range(num_blocks)],
                BasicConv(out_channels//2, out_channels//2, 1)
            )

            self.concat_conv = BasicConv(out_channels, out_channels, 1)

    def forward(self, x):
        x = self.downsample_conv(x)

        x0 = self.split_conv0(x)

        x1 = self.split_conv1(x)
        x1 = self.blocks_conv(x1)

        #------------------------------------#
        #   将大残差边再堆叠回来
        #------------------------------------#
        x = torch.cat([x1, x0], dim=1)
        #------------------------------------#
        #   最后对通道数进行整合
        #------------------------------------#
        x = self.concat_conv(x)

        return x
#---------------------------------------------------#
#   CSPdarknet53 的主体部分       空洞卷积
#   输入为一张416x416x3的图片
#   输出为三个有效特征层
#---------------------------------------------------#
class CSPDarkNet(nn.Module):
    def __init__(self, layers):
        super(CSPDarkNet, self).__init__()
        self.inplanes = 12  #需要改动的初始频道数
        # self.inplanes = 32
        # 416,416,3 -> 416,416,32
        # self.conv1 = CDilated(3, self.inplanes, 3, stride=1,d=2)
        self.conv1 = BasicConv(3, self.inplanes, kernel_size=3, stride=1)
        self.feature_channels = [16,24,40,80,112]            #需要改动的频道数
        # self.feature_channels = [24,40, 80, 112, 192]
        # self.feature_channels = [64, 128, 256, 512, 1024]
        # self.feature_channels = [16, 32, 64, 128, 256]
        self.stages = nn.ModuleList([
            # 416,416,32 -> 208,208,64
            Resblock_body(self.inplanes, self.feature_channels[0], layers[0], first=True, DorC=True),
            # 208,208,64 -> 104,104,128
            Resblock_body(self.feature_channels[0], self.feature_channels[1], layers[1], first=False, DorC=False),
            # 104,104,128 -> 52,52,256
            Resblock_body(self.feature_channels[1], self.feature_channels[2], layers[2], first=False, DorC=False),
            # 52,52,256 -> 26,26,512
            Resblock_body(self.feature_channels[2], self.feature_channels[3], layers[3], first=False, DorC=False),
            # 26,26,512 -> 13,13,1024
            Resblock_body(self.feature_channels[3], self.feature_channels[4], layers[4], first=False, DorC=False)
        ])

        self.conv_out6_channel = nn.Conv2d(self.feature_channels[4], 192, kernel_size=3, padding=1, stride=1)
        self.conv_out6 = nn.MaxPool2d(kernel_size=3, padding=1, stride=2)
        self.conv_out7_channel = nn.Conv2d(192, 320, kernel_size=3, padding=1, stride=1)
        self.conv_out7 = nn.MaxPool2d(kernel_size=3, padding=1, stride=2)
        # self.p5_to_p6 = nn.Sequential(
        #     Conv2dStaticSamePadding(conv_channels[2], num_channels, 1),
        #     nn.BatchNorm2d(num_channels, momentum=0.01, eps=1e-3),
        #     MaxPool2dStaticSamePadding(3, 2)
        # )
        # self.p6_to_p7 = nn.Sequential(
        #     MaxPool2dStaticSamePadding(3, 2)
        # )

        '''在特征图后面加spp，CBL'''
        # self.conv_for_P5= conv2d(self.feature_channels[4],self.feature_channels[4],1)
        # self.SPP        = SpatialPyramidPooling()
        # self.conv1x1 = torch.nn.Conv2d(self.feature_channels[4]*4,self.feature_channels[4],1,1,bias=False)
        # self.conv_for_P4 = conv2d(self.feature_channels[3],self.feature_channels[3],1)
        # self.conv_for_P3 = conv2d(self.feature_channels[2],self.feature_channels[2],1)
        # self.num_features = 3
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()


    def forward(self, x):
        # x = self.conv1(x)
        #
        # x = self.stages[0](x)
        # out2 = self.stages[1](x)
        # out3 = self.stages[2](out2)
        # out4 = self.stages[3](out3)
        # out5 = self.stages[4](out4)
        # out6 = self.conv_out6_channel(out5)
        # out6 = self.conv_out6(out6)
        # return out2,out3, out4, out5,out6
        x = self.conv1(x)

        x = self.stages[0](x)
        x = self.stages[1](x)
        out3 = self.stages[2](x)
        out4 = self.stages[3](out3)
        out5 = self.stages[4](out4)
        '''在特征图后面加spp，CBL'''
        # P5 = self.conv_for_P5(out5)
        # P5 = self.SPP(P5)
        # P5 = self.conv1x1(P5)
        # P5 = self.conv_for_P5(P5)
        # # print("P5{}".format(P5.shape))
        # # P5 = self.conv_for_P5(P5)
        # P4 = self.conv_for_P4(out4)
        # P3 = self.conv_for_P3(out3)
        out6 = self.conv_out6_channel(out5)
        out6 = self.conv_out6(out6)
        out7 = self.conv_out7_channel(out6)
        out7 = self.conv_out7(out7)
        return out3,out4,out5,out6,out7



def darknet53(pretrained, **kwargs):
    model = CSPDarkNet([1, 2, 8, 8, 4])
    # model = CSPDarkNet([1, 2, 6, 6, 3])
    if pretrained:
        if isinstance(pretrained, str):
            model.load_state_dict(torch.load(pretrained))
        else:
            raise Exception("darknet request a pretrained path. got [{}]".format(pretrained))
    return model
