#使用pytorch实现ghostModule替换常规的cnn，现定义的efficientv2的模型。
#pytorch的padding规则（无扩展卷积）,padding='same'情形， p = math.ceil(((s-1)H+k-s)/2)
#stride = 1，输出图=原始图
#stride = 2，输出图=原始图/2
#也即有如下关系：
#           输出图=原始图/stride
#2 = math.ceil(((输出图-1)s-原始图+k)/2)
import time

import torch
import math
import torch.fx
from torch import Tensor,nn
import numpy as np
#-------------------------一些函数定义-----------------------------------
def stochastic_depth(input: Tensor,shortcut:Tensor, p: float, mode: str, training: bool = True) -> Tensor:
    """
    Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
    <https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
    branches of residual architectures.

    Args:
        input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one
                    being its batch i.e. a batch with ``N`` rows.
        p (float): probability of the input to be zeroed.
        mode (str): ``"batch"`` or ``"row"``.
                    ``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
                    randomly selected rows from the batch.
        training: apply stochastic depth if is ``True``. Default: ``True``
    Returns:
        Tensor[N, ...]: The randomly zeroed tensor.
    """
    if p < 0.0 or p > 1.0:
        raise ValueError("drop probability has to be between 0 and 1, but got {}".format(p))
    if mode not in ["batch", "row"]:
        raise ValueError("mode has to be either 'batch' or 'row', but got {}".format(mode))
    if not training or p == 0.0:
        return torch.add(input,shortcut)

    survival_rate = 1.0 - p
    if mode == "row":
        size = [input.shape[0]] + [1] * (input.ndim - 1)
    else:
        size = [1] * input.ndim
    noise = torch.empty(size, dtype=input.dtype, device=input.device)
    noise = noise.bernoulli_(survival_rate).div_(survival_rate)
    return torch.add(input * noise,shortcut)
torch.fx.wrap('stochastic_depth')

class StochasticDepth(nn.Module):
    """
    See :func:`stochastic_depth`.
    """
    def __init__(self, p: float, mode: str) -> None:
        super().__init__()
        self.p = p
        self.mode = mode
    def forward(self, input: Tensor,shortcut:Tensor) -> Tensor:
        return stochastic_depth(input,shortcut, self.p, self.mode, self.training)
    def __repr__(self) -> str:
        tmpstr = self.__class__.__name__ + '('
        tmpstr += 'p=' + str(self.p)
        tmpstr += ', mode=' + str(self.mode)
        tmpstr += ')'
        return tmpstr

def hard_swish(x, inplace: bool = False):
    '''
    比swish更简洁省时，通常h_swish通常只在更深层次上有用
    '''
    if inplace:
        return x.add_(3.).clamp_(0., 6.).div_(6.)
    else:
        return (torch.nn.funcational.relu6(x + 3.) * x)/ 6.

def conv_3x3(in_channel:int, out_channel:int, stride:int):
    '''
    3x3的CBS卷积，padding，no_bias，
    '''
    return nn.Sequential(
        nn.Conv2d(in_channel, out_channel, (3,3), (stride,stride), 1, bias=False),
        nn.BatchNorm2d(out_channel),
        nn.SiLU(inplace=True)
    )

def conv_1x1(in_channel:int, out_channel:int):
    '''
    1x1的CBS卷积，no padding,no bias
    '''
    return nn.Sequential(
        nn.Conv2d(in_channel, out_channel, 1, 1, 0, bias=False),
        nn.BatchNorm2d(out_channel),
        nn.SiLU(inplace=True)
    )

def hard_sigmoid(x):
    '''
    hard_sigmoid是Logistic sigmoid的分段近似函数，更易于计算，学习速率加快
    if x<-2.5,return 0
    if x>2.5,return 1
    if -2.5<=x<=2.5,return 0.2*x+0.5
    tensorflow2已经实现了hard_sigmoid
    '''
    return torch.nn.functional.hardsigmoid(x)

def round_filter(filters,multiplier=1.0):
    divisor = 8
    min_depth = 8
    filters = filters * multiplier
    new_filters = max(min_depth,int(filters + divisor/2)//divisor * divisor)
    return new_filters

def handleInputStageChannels(index,in_channel,out_channel,kernel_size,activation,expand_ratio,use_Fused,
                             stride=1,se_ratio=None,ghost1_drop=None,dropout1=None,shortcut=1,survival=None):
    '''
    这个函数用来处理在循环count时，在每组count的第一个stage到第二个stage的channels切换，导致的stage输入问题的情况
    '''
    if use_Fused:
        return Ghost_Fused_MBConv(in_channel=out_channel if index != 0 else in_channel,
                                  out_channel=out_channel,kernel_size=kernel_size,expand_ratio=expand_ratio,
                                  stride = 1 if index != 0 else stride,
                                  se_ratio=se_ratio,ghost1_drop=ghost1_drop,dropout1=dropout1,
                                  shortcut=shortcut,survival=survival)
    elif not use_Fused:
        return Ghost_MBConv(in_channel=out_channel if index != 0 else in_channel,
                            out_channel=out_channel,kernel_size=kernel_size,expand_ratio=expand_ratio,
                            stride = 1 if index != 0 else stride,
                            se_ratio=se_ratio,ghost1_drop=ghost1_drop,dropout1=dropout1,
                            shortcut=shortcut,survival=survival)
#-------------------------SE模块-------------------------------------------
class SE(nn.Module):
    def __init__(self, in_channel, out_channel, se_ratio: int = 4):
        super(SE, self).__init__()
        self.reduction = round_filter(in_channel / se_ratio)
        # squeeze阶段
        self.globalAveragePool = torch.nn.AdaptiveAvgPool2d(output_size=1)
        #excitation阶段，第一个FC/Conv将输入通道压缩se_ratio倍
        self.fc1 = torch.nn.Sequential(
            torch.nn.Linear(out_channel,self.reduction),
            torch.nn.SiLU(inplace=True),
        )
        self.fc2 = torch.nn.Sequential(
            torch.nn.Linear(self.reduction,out_channel),
            torch.nn.Sigmoid(),
        )
    def forward(self, x):
        b_s, c, w, h = x.size()
        y = self.globalAveragePool(x).view(b_s, c)
        y = self.fc1(y)
        y = self.fc2(y)
        y = y.view(b_s, c, 1, 1)
        return x * y

class GhostModule(torch.nn.Module):
    def __init__(self, in_channel:int, out_channel:int,activation:str,kernel_size:int, stride:int,ratio=2):
        '''
        实现的GhostModule，CNN模型中的feature map的很多是相似的，某些feature map能够通过某种cheap operation生成这些相似的feature map称为
        Ghost map，“幻影“
        Args:
            in_channel:输入通道数
            out_channel:输出通道数
            kernel_size:除3x3卷积外的其他卷积核大小
            ratio:初始的conv会将原channel压缩成原来的多少
            dw_size:Depthwise卷积核大小
            stride:
            use_relu:是否使用relu作为激活函数
            return:GhostModule不改变input的shape，所以输入的channel=输出的channel
            activation:使用激活函数,['Relu','Elu','LeakRelu]
        '''
        super(GhostModule,self).__init__()
        self.init_channel = math.ceil(out_channel / ratio)
        self.new_channel = self.init_channel * (ratio - 1)
        self.in_channel = in_channel
        self.out_channel = out_channel
        self.actiavation = {"Relu":torch.nn.ReLU(True),"Elu":torch.nn.ELU(inplace=True),
                            "LeakRelu":torch.nn.LeakyReLU(inplace=True)}[activation]
        # 常规卷积分成了两部分
        self.primary_conv = torch.nn.Sequential(#CBR
            # 点卷积的卷积核的组数 = 上一层的channel数，大小为1x1xM，M=input.shape(1)
            torch.nn.Conv2d(in_channel, self.init_channel, (kernel_size,)*2, (stride,)*2,padding='same' if stride==1 else 1,bias=False),
            torch.nn.BatchNorm2d(self.init_channel),
            self.actiavation
        )
        self.cheap_operation = torch.nn.Sequential(#CBR
            # group对channel进行分组，默认是，一个channel为一组，这里采用的是分组卷积
            torch.nn.Conv2d(self.init_channel, self.init_channel, (3,)*2, (1,)*2, groups=self.init_channel,padding='same',bias=False),
        )
    def forward(self, input):
        x1 = self.primary_conv(input)
        x2 = self.cheap_operation(x1)
        out = torch.cat((x1, x2), dim=1)  # origin map和ghost map进行拼接
        return out[:, :self.out_channel, :, :]

class Ghost_Fused_MBConv(torch.nn.Module):
    def __init__(self, in_channel, out_channel, kernel_size, expand_ratio=6, stride=1, se_ratio=4, ghost1_drop=None,
                 dropout1=None,shortcut=1, survival=None,epsilon=1e-5):
        super(Ghost_Fused_MBConv, self).__init__()
        self.expand_ratio_filter = round_filter(in_channel * expand_ratio)
        self.stride = stride
        self.in_channel = in_channel
        self.out_channel = out_channel
        self.expand_ratio = expand_ratio
        self.ghost1_drop = ghost1_drop
        self.se_ratio = se_ratio
        self.use_shortcut = shortcut
        self.survival = survival

        # 矫正通道
        if stride == 2:
            self.poolAverage = torch.nn.AvgPool2d(2,2)
        if in_channel != out_channel:
            self.shortcut = GhostModule(in_channel, out_channel, kernel_size=1, stride=1,activation='Relu')
        # 升维阶段
        if expand_ratio != 1:
            self.ghost1 = GhostModule(in_channel, self.expand_ratio_filter,'Relu',kernel_size=kernel_size,stride=stride, ratio=2)
            self.ghost1_bn = torch.nn.BatchNorm2d(self.expand_ratio_filter)
            self.ghost1_act = torch.nn.ReLU()
            if (ghost1_drop is not None) and (ghost1_drop != 0):
                self.ghost1_dropout = torch.nn.Dropout(p=ghost1_drop)
        #SE模块
        if se_ratio is not None:
            self.se = SE(self.expand_ratio_filter,self.expand_ratio_filter,se_ratio)
        #输出阶段
        self.ghost2 = GhostModule(in_channel=self.expand_ratio_filter,out_channel=out_channel,activation='Relu',
                                  kernel_size=1 if expand_ratio != 1 else kernel_size,
                                  stride=1 if expand_ratio != 1 else stride)
        self.out_bn = torch.nn.BatchNorm2d(out_channel,eps=epsilon)

    def forward(self,input):
        shortcut = input
        if self.stride == 2:
            shortcut = self.poolAverage(shortcut)
        if self.in_channel != self.out_channel:
            shortcut = self.shortcut(shortcut)
        #升维
        if self.expand_ratio != 1:
            input = self.ghost1(input)
            input = self.ghost1_bn(input)
            input = self.ghost1_act(input)
            if (self.ghost1_drop is not None) and (self.ghost1_drop != 0):
                input = self.ghost1_dropout(input)
        #SE模块
        if self.se_ratio is not None:
            input = self.se(input)

        input = self.ghost2(input)
        input = self.out_bn(input)

        if self.use_shortcut:#使用直连/残差结构
            if self.survival is not None and self.survival<1:#生存概率（随机深度残差网络中的术语，表示残差支路被激活的概率)
                stoDepth = StochasticDepth(p=self.survival,mode='batch')
                return stoDepth(input,shortcut)
            else:
                return torch.add(input,shortcut)
        else:
            return input

class Ghost_MBConv(torch.nn.Module):
    def __init__(self, in_channel, out_channel, kernel_size, expand_ratio=6, stride=1, se_ratio=4, ghost1_drop=None,
                 dropout1=None,shortcut=1, survival=None,epsilon=1e-5):
        super(Ghost_MBConv, self).__init__()
        self.expand_ratio_filter = round_filter(in_channel * expand_ratio)
        self.stride = stride
        self.in_channel = in_channel
        self.out_channel = out_channel
        self.expand_ratio = expand_ratio
        self.ghost1_drop = ghost1_drop
        self.se_ratio = se_ratio
        self.use_shortcut = shortcut
        self.survival = survival

        # 矫正通道
        if stride == 2:
            self.poolAverage = torch.nn.AvgPool2d(2,2)
        if in_channel != out_channel:
            self.shortcut = GhostModule(in_channel, out_channel,'Relu', kernel_size=1, stride=1)
        #conv1x1
        if expand_ratio != 1:
            self.ghost1 = GhostModule(in_channel,self.expand_ratio_filter,'Relu',kernel_size=1,stride=1,ratio=2)
            self.ghost1_bn = torch.nn.BatchNorm2d(num_features=self.expand_ratio_filter,eps=epsilon)
            self.ghost1_act = torch.nn.ReLU()
        #depthwise conv3x3
        self.depthwise = torch.nn.Conv2d(self.expand_ratio_filter,self.expand_ratio_filter,kernel_size,stride,padding=1,bias=False,
                                         groups=self.expand_ratio_filter)
        self.depthwise_bn = torch.nn.BatchNorm2d(self.expand_ratio_filter,eps=epsilon)
        self.depthwise_act = torch.nn.ReLU()
        #是否dropout
        if (expand_ratio != 1) and (ghost1_drop is not None) and (ghost1_drop != 0):
            self.ghost1_dropout = torch.nn.Dropout(ghost1_drop)
        #SE模块
        if se_ratio is not None:
            self.se = SE(self.expand_ratio_filter,self.expand_ratio_filter,se_ratio)

        #conv1x1
        self.ghost2 = GhostModule(self.expand_ratio_filter,out_channel,'Relu',kernel_size=1,stride=1,ratio=2)
        self.ghost2_bn = torch.nn.BatchNorm2d(num_features=out_channel,eps=epsilon)

    def forward(self,input):
        shorcut = input
        if self.stride == 2:
            shorcut = self.poolAverage(shorcut)
        if self.in_channel != self.out_channel:
            shorcut = self.shortcut(shorcut)
        if self.expand_ratio != 1:#conv1x1
            input = self.ghost1(input)
            input = self.ghost1_bn(input)
            input = self.ghost1_act(input)

        #depthwise conv
        input = self.depthwise(input)
        input = self.depthwise_bn(input)
        input = self.depthwise_act(input)
        #dropout
        if (self.expand_ratio != 1) and (self.ghost1_drop is not None) and (self.ghost1_drop != 0):
            input = self.ghost1_dropout(input)
        #se模块
        if self.se_ratio is not None:
            input = self.se(input)
        #conv1x1，1x1卷积是为了升维或者降维
        input = self.ghost2(input)
        input = self.ghost2_bn(input)
        #short and stochastic depth
        if self.use_shortcut:#如果使用直连结构
            if self.survival is not None and self.survival<1:#生存概率，随机深度残差网络论文中的术语，表示残差之路被激活的概率
                stoDepth = StochasticDepth(p=self.survival,mode='batch')
                return stoDepth(input,shorcut)
            else:
                return torch.add(shorcut,input)
        else:
            return input

class EfficientNetV2(torch.nn.Module):
    '''
    根据EfficientNetV2论文重新实现的EfficientNet-V2-s和官方代码
    Args:
        configuration: stages的配置
        num_classes: 类别数量，也是最终的输出channels
        input: 输入的张量, 若提供了则忽略in_shape
        activation: 通过隐藏层的激活函数
        width_mult: 模型宽度因子, 默认为1
        depth_mult: 模型深度因子,默认为1
        conv_dropout_rate: 在MBConv/Stage后的drop的概率，0或none代表不使用dropout
        dropout_rate: 在GlobalAveragePooling后的drop概率，0或none代表不使用dropout
        drop_connect: 在跳层连接drop概率，0或none代表不使用dropout
        stage7_drop:
    Returns:a tf.keras model
    '''
    def __init__(self,configuration,num_class:int,activation:str,width_mult:float,depth_mult:float,
                 ghost1_drop=None,stage7_drop=None,dropout1=None,epsilon=1e-5):
        super(EfficientNetV2,self).__init__()
        self.stage7_drop = stage7_drop
        #stage0
        self.stage0_conv3 = torch.nn.Sequential(
            torch.nn.Conv2d(3,24,3,2,padding=1,bias=False),
            torch.nn.BatchNorm2d(num_features=24),
            torch.nn.ReLU(),
        )
        #接下来是stage1到stage6
        self.stage1to6 = torch.nn.Sequential()
        for i,stage in enumerate(configuration):
            count = int(math.ceil((stage[0] * depth_mult)))     #stage[0]是count，表示重复多少次
            for j in range(count):
                self.stage1to6.add_module(
                    name='stage{},count{}'.format(i,j),
                    module=handleInputStageChannels(index=j,in_channel=round_filter(stage[4],width_mult),
                                                    out_channel=round_filter(stage[5],width_mult),
                                                    kernel_size=stage[1],activation=activation,expand_ratio=stage[3],
                                                    use_Fused=stage[6],stride=stage[2],se_ratio=stage[7],ghost1_drop=ghost1_drop,
                                                    dropout1=dropout1,shortcut=stage[8],survival=stage[9]))
        #最终stage
        last_channel = configuration[-1][5] #取最后一个stage的输出通道
        self.stage7_conv = torch.nn.Sequential(
            torch.nn.Conv2d(in_channels=last_channel,out_channels=round_filter(1280,width_mult),kernel_size=1,padding='same',bias=False),
            torch.nn.BatchNorm2d(num_features=round_filter(1280,width_mult)),
            torch.nn.ReLU(),
        )
        #全局平均池化，在池化过后还要进行，压缩操作，删除size=1的维度
        self.stage7_globalAverPool = torch.nn.AdaptiveAvgPool2d(1)
        if (self.stage7_drop is not None) and (self.stage7_conv != 0):
            self.stage7_drop = torch.nn.Dropout(stage7_drop)

        self.stage7_classfier = torch.nn.Linear(round_filter(1280),num_class)

        self._initialize_weights()

    def get_gram_TopRight(self,matrix:torch.Tensor):
        '''
        matrix:torch.Tensor=(batch,channel,height,width)
        return:torch.Tensor=(batch,channel,height,width)
        '''
        matrix = matrix.numpy()         #转换成np数组
        features = []
        for b in range(matrix.shape[0]):#处理batch
            feature = []
            for f in range(matrix.shape[1]):#处理channel
                feature2D = matrix[b][f]
                gram_matrix = (feature2D.T)*feature2D
                gramTopRight = np.triu(gram_matrix,0)
                feature.append(gramTopRight)
            features.append(feature)
        return torch.from_numpy(np.array(features))

    def forward(self,inputs):
        x = self.stage0_conv3(inputs)
        x = self.stage1to6(x)
        x = self.stage7_conv(x)
        x = self.stage7_globalAverPool(x)

        x = x.squeeze(-1).squeeze(-1) #删除最后两维
        if (self.stage7_drop is not None) and (self.stage7_drop != 0):
            x = self.stage7_drop(x)
        x = self.stage7_classfier(x)
        return x

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                m.weight.data.normal_(0, 0.001)
                m.bias.data.zero_()

def pr_effinet(select_model:str,num_classes:int,activation:str='relu',width_mult=1.0,depth_mult=1.0,ghost1_drop=0.3,dropout1=0.3,stage7_drop=0.3):
    '''
    计数：该stage重复多少次；扩展比例：MBConv第一个卷积将输入通道扩展成几倍(1,4,6)；SE率：SE模块中第一个FC/Conv层将其缩放到多少，通常是1/4
    次数0，卷积核大小1，步长2，扩展比例3，输入通道数4，输出通道数5，是否Fused6，SE率7，是否shortcut8,生存概率9
    '''
    # 次数0，卷积核大小1，步长2，扩展比例3，输入通道数4，输出通道数5，是否Fused6，SE率7，是否shortcut8,生存概率9
    select_model = int(select_model[-1])
    if select_model == 0:
        print(f"seleted model is {select_model}")
        cfg = [
            [1, 3, 1, 1, 24, 24, True, None, 1, None],  # stage 1
            [1, 3, 2, 4, 24, 48, True, None, 1, None],  # stage 2
            [1, 3, 2, 4, 48, 64, True, None, 1, None],  # stage 3
            [1, 3, 2, 4, 64, 128, False, 4, 1, None],  # stage 4
            [1, 3, 1, 6, 128, 160, False, 4, 1, None],  # stage 5
            [2, 3, 2, 6, 160, 256, False, 4, 1, None],  # stage 6
        ]
    elif select_model == 1:
        print(f"seleted model is {select_model}")
        cfg = [
            [1, 3, 1, 1, 24, 24, True, None, 1, None],  # stage 1
            [1, 3, 2, 4, 24, 48, True, None, 1, None],  # stage 2
            [1, 3, 2, 4, 48, 64, True, None, 1, None],  # stage 3
            [1, 3, 2, 4, 64, 128, False, 4, 1, None],  # stage 4
            [2, 3, 1, 6, 128, 160, False, 4, 1, None],  # stage 5
            [3, 3, 2, 6, 160, 256, False, 4, 1, None],  # stage 6
        ]
    elif select_model == 2:
        print(f"seleted model is {select_model}")
        cfg = [
            # 0, 1  2  3  4   5   6       7   8  9
            [1, 3, 1, 1, 24, 24, True, None, 1, None],  # stage 1
            [1, 3, 2, 4, 24, 48, True, None, 1, None],  # stage 2
            [1, 3, 2, 4, 48, 64, True, None, 1, None],  # stage 3
            [2, 3, 2, 4, 64, 128, False, 4, 1, None],  # stage 4
            [2, 3, 1, 6, 128, 160, False, 4, 1, None],  # stage 5
            [4, 3, 2, 6, 160, 256, False, 4, 1, None],  # stage 6
        ]
    elif select_model == 3:
        print(f"seleted model is {select_model}")
        cfg = [
            # 0, 1  2  3  4   5   6       7   8  9
            [1, 3, 1, 1, 24, 24, True, None, 1, 0.5],  # stage 1
            [2, 3, 2, 4, 24, 48, True, None, 1, 0.5],  # stage 2
            [2, 3, 2, 4, 48, 64, True, None, 1, 0.5],  # stage 3
            [3, 3, 2, 4, 64, 128, False, 4, 1, 0.5],  # stage 4
            [3, 3, 1, 6, 128, 160, False, 4, 1, 0.5],  # stage 5
            [4, 3, 2, 6, 160, 256, False, 4, 1, 0.5],  # stage 6
        ]
    else:
        raise ValueError(f"错误的赋值 {select_model}")
    return EfficientNetV2(cfg, num_class=num_classes, activation=activation,
                               width_mult=width_mult, depth_mult=depth_mult,
                               ghost1_drop=ghost1_drop, dropout1=dropout1, stage7_drop=stage7_drop)

if __name__ == "__main__":
#---------调试模型-------------
    input = torch.randn((1,3,416,416))
    model = pr_effinet(select_model="B0",num_classes=10,activation='relu')
    print(model(input).shape)


# #--------官方计算参数量方法--------------
# model = effnetv2_s(num_classes=8)
#
# total = sum([param.nelement() for param in model.parameters()])
# print("number of parameter:%.2fM"%(total/1e6))

#------------使用thop计算-----------
# from thop import profile
# input = torch.randn(1,3,224,224)
# model = EfficientNetV2_B0(num_class=10,activation='relu')
# flops,params = profile(model,inputs=(input,))
# print("G-EffiNetV2 B0's FLOPS:%f"%(flops))
# print("G-EffiNetV2 B0's Number paramers:%3fM"%(params/1e6))
#
# model = EfficientNetV2_B1(num_class=10,activation='relu')
# flops,params = profile(model,inputs=(input,))
# print("G-EffiNetV2 B1's FLOPS:%f"%(flops))
# print("G-EffiNetV2 B1's Number paramers:%.3fM"%(params/1e6))
#
# model = EfficientNetV2_B2(num_class=10,activation='relu')
# flops,params = profile(model,inputs=(input,))
# print("G-EffiNetV2 B2's FLOPS:%f"%(flops))
# print("G-EffiNetV2 B2's Number paramers:%.3fM"%(params/1e6))

# start = time.time()
# result = EfficientNetV2_B0(num_class=10,activation='relu')(torch.randn(1,3,224,224))
# print(result.shape)
# end = time.time()
# print("Time :",end-start)
