import math
import torch
import torch.nn as nn
# import torch.fx
from torch import Tensor
import torch.nn.functional as F
from collections import OrderedDict
from batchrenorm import BatchRenorm2d

class Mish(nn.Module):
    def __init__(self):
        super(Mish, self).__init__()

    def forward(self, x):
        return x * torch.tanh(F.softplus(x))

class BN_Conv_Mish(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride, padding, dilation=1, groups=1, bias=False):
        super(BN_Conv_Mish, self).__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation=dilation,
                              groups=groups, bias=bias)
        self.bn = nn.BatchNorm2d(out_channels)      #改BN了
        # self.bn = BatchRenorm2d(out_channels)

    def forward(self, x):
        out = self.bn(self.conv(x))
        return Mish()(out)
    '''随机深度'''
def stochastic_depth(input: Tensor,shortcut:Tensor, p: float, mode: str, training: bool = True) -> Tensor:
    """
    Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
    <https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
    branches of residual architectures.

    Args:
        input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one
                    being its batch i.e. a batch with ``N`` rows.
        p (float): probability of the input to be zeroed.
        mode (str): ``"batch"`` or ``"row"``.
                    ``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
                    randomly selected rows from the batch.
        training: apply stochastic depth if is ``True``. Default: ``True``
    Returns:
        Tensor[N, ...]: The randomly zeroed tensor.
    """
    if p < 0.0 or p > 1.0:
        raise ValueError("drop probability has to be between 0 and 1, but got {}".format(p))
    if mode not in ["batch", "row"]:
        raise ValueError("mode has to be either 'batch' or 'row', but got {}".format(mode))
    if not training or p == 0.0:
        return torch.add(input,shortcut)

    survival_rate = 1.0 - p
    if mode == "row":
        size = [input.shape[0]] + [1] * (input.ndim - 1)
    else:
        size = [1] * input.ndim
    noise = torch.empty(size, dtype=input.dtype, device=input.device)
    noise = noise.bernoulli_(survival_rate).div_(survival_rate)
    return torch.add(input * noise,shortcut)
# torch.fx.wrap('stochastic_depth')
class StochasticDepth(nn.Module):
    """
    See :func:`stochastic_depth`.
    """
    def __init__(self, p: float, mode: str) -> None:
        super().__init__()
        self.p = p
        self.mode = mode
    def forward(self, input: Tensor,shortcut:Tensor) -> Tensor:
        return stochastic_depth(input,shortcut, self.p, self.mode, self.training)
    def __repr__(self) -> str:
        tmpstr = self.__class__.__name__ + '('
        tmpstr += 'p=' + str(self.p)
        tmpstr += ', mode=' + str(self.mode)
        tmpstr += ')'
        return tmpstr


class ResidualBlock(nn.Module):
    """
    basic residual block for CSP-Darknet
    """
    def __init__(self, chnls, inner_chnnls=None):
        super(ResidualBlock, self).__init__()
        if inner_chnnls is None:
            inner_chnnls = chnls
        self.conv1 = BN_Conv_Mish(chnls, inner_chnnls, 1, 1, 0)     # always use samepadding
        self.conv2 = nn.Conv2d(inner_chnnls, chnls, 3, 1, 1, bias=False)
        self.bn = nn.BatchNorm2d(chnls)
        # self.bn = BatchRenorm2d(chnls)                   #改BN了

    def forward(self, x):
        out = self.conv1(x)
        out = self.conv2(out)
        # stoDepth = StochasticDepth(p=0.2, mode='batch')    #随机深度切换
        # out = stoDepth(x, self.bn(out))
        out = self.bn(out) + x
        return Mish()(out)

#---------------------------------------------------#
#   SPP结构，利用不同大小的池化核进行池化
#   池化后堆叠
#---------------------------------------------------#
class SpatialPyramidPooling(nn.Module):
    def __init__(self, pool_sizes=[5, 9, 13]):
        super(SpatialPyramidPooling, self).__init__()
        self.maxpools = nn.ModuleList([nn.MaxPool2d(pool_size, 1, pool_size//2) for pool_size in pool_sizes])
    def forward(self, x):
        # features = [maxpool(x) for maxpool in self.maxpools[::-1]]
        features = []
        for maxpool in self.maxpools[::-1]:
            tensor = maxpool(x)
            features.append(tensor)
        features = torch.cat(features + [x], dim=1)
        return features

class CSPFirst(nn.Module):
    """
    First CSP Stage
    """
    def __init__(self, in_chnnls, out_chnls):
        super(CSPFirst, self).__init__()
        self.dsample = BN_Conv_Mish(in_chnnls, out_chnls, 3, 2, 1)     # same padding
        self.trans_0 = BN_Conv_Mish(out_chnls, out_chnls, 1, 1, 0)
        self.trans_1 = BN_Conv_Mish(out_chnls, out_chnls, 1, 1, 0)
        self.block = ResidualBlock(out_chnls, out_chnls//2)
        self.trans_cat = BN_Conv_Mish(2*out_chnls, out_chnls, 1, 1, 0)

    def forward(self, x):
        x = self.dsample(x)
        out_0 = self.trans_0(x)
        out_1 = self.trans_1(x)
        out_1 = self.block(out_1)
        out = torch.cat((out_0, out_1), 1)
        out = self.trans_cat(out)
        return out


class CSPStem(nn.Module):
    """
    CSP structures including downsampling
    """

    def __init__(self, in_chnls, out_chnls, num_block):
        super(CSPStem, self).__init__()
        self.dsample = BN_Conv_Mish(in_chnls, out_chnls, 3, 2, 1)
        self.trans_0 = BN_Conv_Mish(out_chnls, out_chnls//2, 1, 1, 0)
        self.trans_1 = BN_Conv_Mish(out_chnls, out_chnls//2, 1, 1, 0)
        self.blocks = nn.Sequential(*[ResidualBlock(out_chnls//2) for _ in range(num_block)])
        self.trans_cat = BN_Conv_Mish(out_chnls, out_chnls, 1, 1, 0)

    def forward(self, x):
        x = self.dsample(x)
        out_0 = self.trans_0(x)
        out_1 = self.trans_1(x)
        out_1 = self.blocks(out_1)
        out = torch.cat((out_0, out_1), 1)
        out = self.trans_cat(out)
        return out


class CSP_DarkNet(nn.Module):
    """
    CSP-DarkNet
    """
    def __init__(self, num_blocks: object, num_classes=1000) -> object:
        super(CSP_DarkNet, self).__init__()
        # chnls = [64, 128, 256, 512, 1024]
        # chnls = [32, 36, 40, 80, 112]
        chnls = [4, 8, 10, 20, 28]
        # self.conv0 = BN_Conv_Mish(3, 32, 3, 1, 1)   # same padding
        # self.neck = CSPFirst(32, chnls[0])
        self.conv0 = BN_Conv_Mish(3, 4, 3, 1, 1)  # same padding
        self.neck = CSPFirst(4, chnls[0])
        self.body = nn.Sequential(
            *[CSPStem(chnls[i], chnls[i+1], num_blocks[i]) for i in range(4)])




        # self.conv_out6_channel = nn.Conv2d(chnls[4], 192, kernel_size=3, padding=1, stride=2)
        # self.conv_out6 = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)
        # self.conv_out7_channel = nn.Conv2d(192, 320, kernel_size=3, padding=1, stride=2)
        # self.conv_out7 = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)
        self.conv_out6_channel = nn.Conv2d(chnls[4], 48, kernel_size=3, padding=1, stride=2)
        self.conv_out6 = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)
        self.conv_out7_channel = nn.Conv2d(48, 160, kernel_size=3, padding=1, stride=2)
        self.conv_out7 = nn.MaxPool2d(kernel_size=3, padding=1, stride=1)


        self.spp3 = SpatialPyramidPooling(pool_sizes=[13, 23, 31])
        self.spp4 = SpatialPyramidPooling(pool_sizes=[9, 15, 23])
        self.spp5 = SpatialPyramidPooling(pool_sizes=[5, 9, 13])
        self.conv1x13 = torch.nn.Conv2d(chnls[2] * 4, chnls[2], 1, 1, bias=False)
        self.conv1x14 = torch.nn.Conv2d(chnls[3] * 4, chnls[3], 1, 1, bias=False)
        self.conv1x15 = torch.nn.Conv2d(chnls[4] * 4, chnls[4], 1, 1, bias=False)




        self.spp6 = SpatialPyramidPooling(pool_sizes=[3,5,7])
        self.spp7 = SpatialPyramidPooling(pool_sizes=[3])
        # self.conv1x16 = torch.nn.Conv2d(192 * 4, 192, 1, 1, bias=False)
        # self.conv1x17 = torch.nn.Conv2d(320 * 2, 320, 1, 1, bias=False)
        self.conv1x16 = torch.nn.Conv2d(48 * 4, 192, 1, 1, bias=False)
        self.conv1x17 = torch.nn.Conv2d(160 * 2, 320, 1, 1, bias=False)

    def forward(self, x):
        out = self.conv0(x)
        out = self.neck(out)
        out = self.body(out)

        return F.softmax(out)

# def csp_darknet_53(num_classes=1000):
#     return CSP_DarkNet([2, 8, 8, 4], num_classes)

class csp_darknet_53(nn.Module):
    def __init__(self, num_class=15):
        super(csp_darknet_53, self).__init__()
        model = CSP_DarkNet([2, 8, 8, 4],num_classes=num_class)
        # del model.stage7_conv
        # del model.stage7_globalAverPool
        # del model.stage7_classfier
        self.model = model
    def forward(self,x):
        # x = self.model.stage0_conv3(x)
        # out1 = self.model.stage1to6[0](x)
        # out2 = self.model.stage1to6[1](out1)
        # out3 = self.model.stage1to6[2](out2)
        # out4 = self.model.stage1to6[3](out3)
        # out5 = self.model.stage1to6[4](out4)
        # out6 = self.model.stage1to6[5](out5)
        # return out4,out5,out6
        x = self.model.conv0(x)
        x = self.model.neck(x)

        # feature_maps = [] #后俩个加SPP
        # for idx, stage in enumerate(self.model.body):
        #     x = stage(x)
        #     feature_maps.append(x)
        # out6 = self.model.conv_out6_channel(feature_maps[3])
        # # out6 = self.model.conv_out6(out6)
        # out6 = self.model.spp6(out6)
        # out6 = self.model.conv1x16(out6)
        # out7 = self.model.conv_out7_channel(out6)
        # # out7 = self.model.conv_out7(out7)
        # out7 = self.model.spp7(out7)
        # out7 = self.model.conv1x17(out7)
        # return feature_maps[1],feature_maps[2],feature_maps[3],out6,out7


        # feature_maps = []#五个用SPP方案一
        # for idx, stage in enumerate(self.model.body):
        #     x = stage(x)
        #     feature_maps.append(x)
        # out6 = self.model.conv_out6_channel(feature_maps[3])
        #
        # out3 = self.model.spp3(feature_maps[1])
        # out3 = self.model.conv1x13(out3)
        # out4 = self.model.spp3(feature_maps[2])
        # out4 = self.model.conv1x14(out4)
        # out5 = self.model.spp3(feature_maps[3])
        # out5 = self.model.conv1x15(out5)
        #
        # # out6 = self.model.conv_out6(out6)
        # out6 = self.model.spp6(out6)
        # out6 = self.model.conv1x16(out6)
        # out7 = self.model.conv_out7_channel(out6)
        # # out7 = self.model.conv_out7(out7)
        # out7 = self.model.spp7(out7)
        # out7 = self.model.conv1x17(out7)
        # return out3, out4, out5, out6, out7

        feature_maps = []  # 五个用SPP方案二
        for idx, stage in enumerate(self.model.body):
            x = stage(x)
            feature_maps.append(x)
        outx = self.model.conv_out6_channel(feature_maps[3])

        out3 = self.model.spp3(feature_maps[1])
        out4 = self.model.spp3(feature_maps[2])
        out5 = self.model.spp3(feature_maps[3])


        # out6 = self.model.conv_out6(out6)
        out6 = self.model.spp6(outx)

        out7 = self.model.conv_out7_channel(outx)
        # out7 = self.model.conv_out7(out7)
        out7 = self.model.spp7(out7)

        return out3, out4, out5, out6, out7
