# import math
# import warnings
# import time
import paddle.nn as nn

import paddle

from paddle.nn.initializer import TruncatedNormal, Constant, Normal

trunc_normal_ = TruncatedNormal(std=.02)
normal_ = Normal
zeros_ = Constant(value=0.)
ones_ = Constant(value=1.)

def _make_divisible(v, divisor, min_value=None):
    """
    This function is taken from the original tf repo.
    It ensures that all layers have a channel number that is divisible by 8
    It can be seen here:
    https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
    :param v:
    :param divisor:
    :param min_value:
    :return:
    """
    if min_value is None:
        min_value = divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    # Make sure that round down does not go down by more than 10%.
    if new_v < 0.9 * v:
        new_v += divisor
    return new_v

# from timm.models.layers import SqueezeExcite


def make_divisible(v, divisor=8, min_value=None, round_limit=.9):
    min_value = min_value or divisor
    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
    # Make sure that round down does not go down by more than 10%.
    if new_v < round_limit * v:
        new_v += divisor
    return new_v


# def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs):
#     act_layer = get_act_layer(name)
#     if act_layer is None:
#         return None
#     return act_layer(**kwargs) if inplace is None else act_layer(inplace=inplace, **kwargs)



class SEModule(nn.Layer):
    """ SE Module as defined in original SE-Nets with a few additions
    Additions include:
        * divisor can be specified to keep channels % div == 0 (default: 8)
        * reduction channels can be specified directly by arg (if rd_channels is set)
        * reduction channels can be specified by float rd_ratio (default: 1/16)
        * global max pooling can be added to the squeeze aggregation
        * customizable activation, normalization, and gate layer
    """
    def __init__(
            self, channels, rd_ratio=1. / 16, rd_channels=None, rd_divisor=8,
            act_layer=nn.ReLU):
        super(SEModule, self).__init__()
        if not rd_channels:
            rd_channels = make_divisible(channels * rd_ratio, rd_divisor, round_limit=0.)
        self.fc1 = nn.Conv2D(channels, rd_channels, kernel_size=1, bias_attr=True)
        self.act = act_layer()
        self.fc2 = nn.Conv2D(rd_channels, channels, kernel_size=1, bias_attr=True)
        # self.gate = create_act_layer(gate_layer)

    def forward(self, x):
        x_se = x.mean((2, 3), keepdim=True)
        x_se = self.fc1(x_se)
        x_se = self.act(x_se)
        x_se = self.fc2(x_se)
        return x * nn.functional.sigmoid(x_se)



class Conv2D_BN(nn.Sequential):
    def __init__(self, a, b, ks=1, stride=1, pad=0, dilation=1,
                 groups=1, bn_weight_init=1, resolution=-10000):
        super().__init__()
        self.add_sublayer('c', nn.Conv2D(
            a, b, ks, stride, pad, dilation, groups, bias_attr=False))
        self.add_sublayer('bn', nn.BatchNorm2D(b))
        # bn = nn.BatchNorm2D(b)
        # bn._variance
        # bn._mean
        # bn._epsilon
        # c = nn.Conv2D(
        #     a, b, ks, stride, pad, dilation, groups, bias_attr=False)
        # c._groups
        # c._stride
        # c._padding
        # c._dilation
        if bn_weight_init ==1:
            ones_(self.bn.weight)
        else:
            zeros_(self.bn.weight)
        zeros_(self.bn.bias)

    @paddle.no_grad()
    def fuse(self):
        c, bn = self.c, self.bn
        w = bn.weight / (bn._variance + bn._epsilon)**0.5
        w = c.weight * w[:, None, None, None]
        b = bn.bias - bn._mean * bn.weight / \
            (bn._variance + bn._epsilon)**0.5
        m = nn.Conv2D(w.shape[1] * self.c._groups, w.shape[0], w.shape[2:], stride=self.c._stride, padding=self.c._padding, dilation=self.c._dilation, groups=self.c._groups)
        m.weight.set_value(w)
        m.bias.set_value(b)
        return m

class Residual(nn.Layer):
    def __init__(self, m, drop=0.):
        super().__init__()
        self.m = m
        self.drop = drop

    def forward(self, x):
        if self.training and self.drop > 0:
            return x + self.m(x) * paddle.rand(x.size(0), 1, 1, 1).ge_(self.drop).div(1 - self.drop).detach()
        else:
            return x + self.m(x)
    
    @paddle.no_grad()
    def fuse(self):
        if isinstance(self.m, Conv2D_BN):
            m = self.m.fuse()
            assert(m._groups == m.in_channels)
            identity = paddle.ones([m.weight.shape[0], m.weight.shape[1], 1, 1])
            identity = nn.functional.pad(identity, [1,1,1,1])
            m.weight += identity
            return m
        elif isinstance(self.m, nn.Conv2D):
            m = self.m
            assert(m._groups != m.in_channels)
            identity = paddle.ones([m.weight.shape[0], m.weight.shape[1], 1, 1])
            identity = nn.functional.pad(identity, [1,1,1,1])
            m.weight += identity
            return m
        else:
            return self


class RepVGGDW(nn.Layer):
    def __init__(self, ed) -> None:
        super().__init__()
        self.conv = Conv2D_BN(ed, ed, 3, 1, 1, groups=ed)
        self.conv1 = nn.Conv2D(ed, ed, 1, 1, 0, groups=ed)
        self.dim = ed
        self.bn = nn.BatchNorm2D(ed)
    
    def forward(self, x):
        return self.bn((self.conv(x) + self.conv1(x)) + x)
    
    @paddle.no_grad()
    def fuse(self):
        conv = self.conv.fuse()
        conv1 = self.conv1
        
        conv_w = conv.weight
        conv_b = conv.bias
        conv1_w = conv1.weight
        conv1_b = conv1.bias
        
        conv1_w = nn.functional.pad(conv1_w, [1,1,1,1])

        identity = nn.functional.pad(paddle.ones([conv1_w.shape[0], conv1_w.shape[1], 1, 1]), [1,1,1,1])

        final_conv_w = conv_w + conv1_w + identity
        final_conv_b = conv_b + conv1_b

        conv.weight.set_value(final_conv_w)
        conv.bias.set_value(final_conv_b)

        bn = self.bn
        w = bn.weight / (bn._variance + bn._epsilon)**0.5
        w = conv.weight * w[:, None, None, None]
        b = bn.bias + (conv.bias - bn._mean) * bn.weight / \
            (bn._variance + bn._epsilon)**0.5
        conv.weight.set_value(w)
        conv.bias.set_value(b)
        return conv


class RepViTBlock(nn.Layer):
    def __init__(self, inp, hidden_dim, oup, kernel_size, stride, use_se, use_hs):
        super(RepViTBlock, self).__init__()
        assert stride in [1, 2]

        self.identity = stride == 1 and inp == oup
        assert(hidden_dim == 2 * inp)

        if stride == 2:
            stride = [2, 1]
            self.token_mixer = nn.Sequential(
                Conv2D_BN(inp, inp, kernel_size, stride, (kernel_size - 1) // 2, groups=inp),
                SEModule(inp, 0.25) if use_se else nn.Identity(),
                Conv2D_BN(inp, oup, ks=1, stride=1, pad=0)
            )
            self.channel_mixer = Residual(nn.Sequential(
                    # pw
                    Conv2D_BN(oup, 2 * oup, 1, 1, 0),
                    nn.GELU() if use_hs else nn.GELU(),
                    # pw-linear
                    Conv2D_BN(2 * oup, oup, 1, 1, 0, bn_weight_init=0),
                ))
        else:
            assert(self.identity)
            self.token_mixer = nn.Sequential(
                RepVGGDW(inp),
                SEModule(inp, 0.25) if use_se else nn.Identity(),
            )
            self.channel_mixer = Residual(nn.Sequential(
                    # pw
                    Conv2D_BN(inp, hidden_dim, 1, 1, 0),
                    nn.GELU() if use_hs else nn.GELU(),
                    # pw-linear
                    Conv2D_BN(hidden_dim, oup, 1, 1, 0, bn_weight_init=0),
                ))

    def forward(self, x):
        return self.channel_mixer(self.token_mixer(x))

# f
# def _no_grad_trunc_normal_(tensor, mean, std, a, b):
#     # Cut & paste from Pypaddle official master until it's in a few official releases - RW
#     # Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
#     def norm_cdf(x):
#         # Computes standard normal cumulative distribution function
#         return (1. + math.erf(x / math.sqrt(2.))) / 2.

#     if (mean < a - 2 * std) or (mean > b + 2 * std):
#         warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
#                       "The distribution of values may be incorrect.",
#                       stacklevel=2)

#     with paddle.no_grad():
#         # Values are generated by using a truncated uniform distribution and
#         # then using the inverse CDF for the normal distribution.
#         # Get upper and lower cdf values
#         l = norm_cdf((a - mean) / std)
#         u = norm_cdf((b - mean) / std)

#         # Uniformly fill tensor with values from [l, u], then translate to
#         # [2l-1, 2u-1].
#         tensor.uniform_(2 * l - 1, 2 * u - 1)

#         # Use inverse cdf transform for normal distribution to get truncated
#         # standard normal
#         tensor.erfinv_()

#         # Transform to proper mean, std
#         tensor.mul_(std * math.sqrt(2.))
#         tensor.add_(mean)

#         # Clamp to ensure it's in the proper range
#         tensor.clamp_(min=a, max=b)
#         return tensor


# def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
#     # type: (Tensor, float, float, float, float) -> Tensor
#     r"""Fills the input Tensor with values drawn from a truncated
#     normal distribution. The values are effectively drawn from the
#     normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
#     with values outside :math:`[a, b]` redrawn until they are within
#     the bounds. The method used for generating the random values works
#     best when :math:`a \leq \text{mean} \leq b`.
#     Args:
#         tensor: an n-dimensional `paddle.Tensor`
#         mean: the mean of the normal distribution
#         std: the standard deviation of the normal distribution
#         a: the minimum cutoff value
#         b: the maximum cutoff value
#     Examples:
#         >>> w = paddle.empty(3, 5)
#         >>> nn.init.trunc_normal_(w)
#     """
#     return _no_grad_trunc_normal_(tensor, mean, std, a, b)

# class BN_Linear(nn.Sequential):
#     def __init__(self, a, b, bias=True, std=0.02):
#         super().__init__()
#         self.add_sublayer('bn', nn.BatchNorm1D(a))
#         self.add_sublayer('l', nn.Linear(a, b, bias_attr=bias))
#         trunc_normal_(self.l.weight)
#         if bias:
#             zeros_(self.l.bias)

#     @paddle.no_grad()
#     def fuse(self):
#         bn, l = self.bn, self.l
#         w = bn.weight / (bn._variance + bn._epsilon)**0.5
#         b = bn.bias - self.bn._mean * \
#             self.bn.weight / (bn._variance + bn._epsilon)**0.5
#         w = l.weight * w[None, :]
#         if l.bias is None:
#             b = b @ self.l.weight.T
#         else:
#             b = (l.weight @ b[:, None]).view(-1) + self.l.bias
#         m = nn.Linear(w.size(1), w.size(0))
#         m.weight.set_value(w)
#         m.bias.set_value(b)
#         return m

# class Classfier(nn.Layer):
#     def __init__(self, dim, in_channels, distillation=True):
#         super().__init__()
#         self.classifier = BN_Linear(dim, in_channels) if in_channels > 0 else nn.Identity()
#         self.distillation = distillation
#         if distillation:
#             self.classifier_dist = BN_Linear(dim, in_channels) if in_channels > 0 else nn.Identity()

#     def forward(self, x):
#         if self.distillation:
#             x = self.classifier(x), self.classifier_dist(x)
#             if not self.training:
#                 x = (x[0] + x[1]) / 2
#         else:
#             x = self.classifier(x)
#         return x

#     @paddle.no_grad()
#     def fuse(self):
#         classifier = self.classifier.fuse()
#         if self.distillation:
#             classifier_dist = self.classifier_dist.fuse()
#             classifier.weight += classifier_dist.weight
#             classifier.bias += classifier_dist.bias
#             classifier.weight /= 2
#             classifier.bias /= 2
#             return classifier
#         else:
#             return classifier

class RepViT(nn.Layer):
    def __init__(self, cfgs, in_channels=1000, distillation=False):
        super(RepViT, self).__init__()
        # setting of inverted residual blocks
        self.cfgs = cfgs

        # building first layer
        input_channel = self.cfgs[0][2]
        patch_embed = nn.Sequential(Conv2D_BN(3, input_channel // 2, 3, 2, 1), nn.GELU(),
                           Conv2D_BN(input_channel // 2, input_channel, 3, 2, 1))
        layers = [patch_embed]
        # building inverted residual blocks
        block = RepViTBlock
        for k, t, c, use_se, use_hs, s in self.cfgs:
            output_channel = _make_divisible(c, 8)
            exp_size = _make_divisible(input_channel * t, 8)
            layers.append(block(input_channel, exp_size, output_channel, k, s, use_se, use_hs))
            input_channel = output_channel
        self.features = nn.LayerList(layers)
        self.out_channels = self.cfgs[-1][2]
        # self.classifier = Classfier(output_channel, in_channels, distillation)
        
    def forward(self, x):
        # x = self.features(x)
        for f in self.features:
            x = f(x)
        h= x.shape[2]
        x = nn.functional.avg_pool2d(x, [h, 2])
        return x

# from timm.models import register_model


# @register_model
def repvit_m0_6(pretrained=False, in_channels = 1000, distillation=False):
    """
    Constructs a MobileNetV3-Large model
    """
    # k, t, c, SE, HS, s 
    cfgs = [
        [3,   2,  40, 1, 0, 1],
        [3,   2,  40, 0, 0, 1],

        [3,   2,  80, 0, 0, 2],
        [3,   2,  80, 1, 0, 1],
        [3,   2,  80, 0, 0, 1],

        [3,   2, 160, 0, 1, 2],
        [3,   2, 160, 1, 1, 1],
        [3,   2, 160, 0, 1, 1],
        [3,   2, 160, 1, 1, 1],
        [3,   2, 160, 0, 1, 1],
        [3,   2, 160, 1, 1, 1],
        [3,   2, 160, 0, 1, 1],
        [3,   2, 160, 1, 1, 1],
        [3,   2, 160, 0, 1, 1],
        [3,   2, 160, 0, 1, 1],

        [3,   2, 320, 0, 1, 2],
        [3,   2, 320, 1, 1, 1],
    ]
    return RepViT(cfgs, in_channels=in_channels, distillation=distillation)

# @register_model
def repvit_svtr(pretrained=False, in_channels = 1000, distillation=False):
    """
    Constructs a MobileNetV3-Large model
    """
    # k, t, c, SE, HS, s 
    cfgs = [
        # [3,   2,  48, 1, 0, 1],
        # [3,   2,  40, 0, 0, 1],

        # [3,   2,  80, 0, 0, 1],
        [3,   2,  96, 1, 0, 1],
        [3,   2,  96, 0, 0, 1],
        [3,   2,  96, 0, 0, 1],

        [3,   2, 192, 0, 1, 2],

        [3,   2, 192, 1, 1, 1],
        [3,   2, 192, 0, 1, 1],
        [3,   2, 192, 1, 1, 1],
        [3,   2, 192, 0, 1, 1],
        [3,   2, 192, 1, 1, 1],
        [3,   2, 192, 0, 1, 1],
        # [3,   2, 160, 0, 1, 1],
        # [3,   2, 160, 1, 1, 1],
        # [3,   2, 160, 0, 1, 1],
        # [3,   2, 160, 0, 1, 1],

        [3,   2, 384, 0, 1, 2],

        [3,   2, 384, 1, 1, 1],
        [3,   2, 384, 0, 1, 1],
        # [3,   2, 384, 1, 1, 1],
    ]
    return RepViT(cfgs, in_channels=in_channels, distillation=distillation)

# @register_model
def repvit_m0_9(pretrained=False, in_channels = 1000, distillation=False):
    """
    Constructs a MobileNetV3-Large model
    """
    cfgs = [
        # k, t, c, SE, HS, s 
        # [3,   2,  48, 1, 0, 1],
        # [3,   2,  48, 0, 0, 1],
        # [3,   2,  48, 0, 0, 1],
        [3,   2,  96, 1, 0, 1],
        [3,   2,  96, 0, 0, 1],
        [3,   2,  96, 1, 0, 1],
        [3,   2,  96, 0, 0, 1],

        [3,   2,  192, 0, 1, 2],

        [3,   2,  192, 1, 1, 1],
        [3,   2,  192, 0, 1, 1],
        [3,   2,  192, 1, 1, 1],
        [3,   2, 192, 0, 1, 1],
        [3,   2, 192, 1, 1, 1],
        [3,   2, 192, 0, 1, 1],
        [3,   2, 192, 1, 1, 1],
        [3,   2, 192, 0, 1, 1],
        [3,   2, 192, 1, 1, 1],
        [3,   2, 192, 0, 1, 1],
        [3,   2, 192, 1, 1, 1],
        [3,   2, 192, 0, 1, 1],
        [3,   2, 192, 1, 1, 1],
        [3,   2, 192, 0, 1, 1],
        [3,   2, 192, 0, 1, 1],

        [3,   2, 384, 0, 1, 2],

        [3,   2, 384, 1, 1, 1],
        [3,   2, 384, 0, 1, 1]
    ]
    return RepViT(cfgs, in_channels=in_channels, distillation=distillation)

# @register_model
def repvit_m1_0(pretrained=False, in_channels = 1000, distillation=False):
    """
    Constructs a MobileNetV3-Large model
    """
    cfgs = [
        # k, t, c, SE, HS, s 
        [3,   2,  56, 1, 0, 1],
        [3,   2,  56, 0, 0, 1],
        [3,   2,  56, 0, 0, 1],
        [3,   2,  112, 0, 0, 2],
        [3,   2,  112, 1, 0, 1],
        [3,   2,  112, 0, 0, 1],
        [3,   2,  112, 0, 0, 1],
        [3,   2,  224, 0, 1, 2],
        [3,   2,  224, 1, 1, 1],
        [3,   2,  224, 0, 1, 1],
        [3,   2,  224, 1, 1, 1],
        [3,   2, 224, 0, 1, 1],
        [3,   2, 224, 1, 1, 1],
        [3,   2, 224, 0, 1, 1],
        [3,   2, 224, 1, 1, 1],
        [3,   2, 224, 0, 1, 1],
        [3,   2, 224, 1, 1, 1],
        [3,   2, 224, 0, 1, 1],
        [3,   2, 224, 1, 1, 1],
        [3,   2, 224, 0, 1, 1],
        [3,   2, 224, 1, 1, 1],
        [3,   2, 224, 0, 1, 1],
        [3,   2, 224, 0, 1, 1],
        [3,   2, 448, 0, 1, 2],
        [3,   2, 448, 1, 1, 1],
        [3,   2, 448, 0, 1, 1]
    ]
    return RepViT(cfgs, in_channels=in_channels, distillation=distillation)


# @register_model
def repvit_m1_1(pretrained=False, in_channels = 1000, distillation=False):
    """
    Constructs a MobileNetV3-Large model
    """
    cfgs = [
        # k, t, c, SE, HS, s 
        [3,   2,  64, 1, 0, 1],
        [3,   2,  64, 0, 0, 1],
        [3,   2,  64, 0, 0, 1],
        [3,   2,  128, 0, 0, 2],
        [3,   2,  128, 1, 0, 1],
        [3,   2,  128, 0, 0, 1],
        [3,   2,  128, 0, 0, 1],
        [3,   2,  256, 0, 1, 2],
        [3,   2,  256, 1, 1, 1],
        [3,   2,  256, 0, 1, 1],
        [3,   2,  256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 512, 0, 1, 2],
        [3,   2, 512, 1, 1, 1],
        [3,   2, 512, 0, 1, 1]
    ]
    return RepViT(cfgs, in_channels=in_channels, distillation=distillation)


# @register_model
def repvit_m1_5(pretrained=False, in_channels = 1000, distillation=False):
    """
    Constructs a MobileNetV3-Large model
    """
    cfgs = [
        # k, t, c, SE, HS, s 
        [3,   2,  64, 1, 0, 1],
        [3,   2,  64, 0, 0, 1],
        [3,   2,  64, 1, 0, 1],
        [3,   2,  64, 0, 0, 1],
        [3,   2,  64, 0, 0, 1],
        [3,   2,  128, 0, 0, 2],
        [3,   2,  128, 1, 0, 1],
        [3,   2,  128, 0, 0, 1],
        [3,   2,  128, 1, 0, 1],
        [3,   2,  128, 0, 0, 1],
        [3,   2,  128, 0, 0, 1],
        [3,   2,  256, 0, 1, 2],
        [3,   2,  256, 1, 1, 1],
        [3,   2,  256, 0, 1, 1],
        [3,   2,  256, 1, 1, 1],
        [3,   2,  256, 0, 1, 1],
        [3,   2,  256, 1, 1, 1],
        [3,   2,  256, 0, 1, 1],
        [3,   2,  256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 1, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 256, 0, 1, 1],
        [3,   2, 512, 0, 1, 2],
        [3,   2, 512, 1, 1, 1],
        [3,   2, 512, 0, 1, 1],
        [3,   2, 512, 1, 1, 1],
        [3,   2, 512, 0, 1, 1]
    ]
    return RepViT(cfgs, in_channels=in_channels, distillation=distillation)



# @register_model
def repvit_m2_3(pretrained=False, in_channels = 1000, distillation=False):
    """
    Constructs a MobileNetV3-Large model
    """
    cfgs = [
        # k, t, c, SE, HS, s 
        [3,   2,  80, 1, 0, 1],
        [3,   2,  80, 0, 0, 1],
        [3,   2,  80, 1, 0, 1],
        [3,   2,  80, 0, 0, 1],
        [3,   2,  80, 1, 0, 1],
        [3,   2,  80, 0, 0, 1],
        [3,   2,  80, 0, 0, 1],
        [3,   2,  160, 0, 0, 2],
        [3,   2,  160, 1, 0, 1],
        [3,   2,  160, 0, 0, 1],
        [3,   2,  160, 1, 0, 1],
        [3,   2,  160, 0, 0, 1],
        [3,   2,  160, 1, 0, 1],
        [3,   2,  160, 0, 0, 1],
        [3,   2,  160, 0, 0, 1],
        [3,   2,  320, 0, 1, 2],
        [3,   2,  320, 1, 1, 1],
        [3,   2,  320, 0, 1, 1],
        [3,   2,  320, 1, 1, 1],
        [3,   2,  320, 0, 1, 1],
        [3,   2,  320, 1, 1, 1],
        [3,   2,  320, 0, 1, 1],
        [3,   2,  320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 1, 1, 1],
        [3,   2, 320, 0, 1, 1],
        # [3,   2, 320, 1, 1, 1],
        # [3,   2, 320, 0, 1, 1],
        [3,   2, 320, 0, 1, 1],
        [3,   2, 640, 0, 1, 2],
        [3,   2, 640, 1, 1, 1],
        [3,   2, 640, 0, 1, 1],
        # [3,   2, 640, 1, 1, 1],
        # [3,   2, 640, 0, 1, 1]
    ]    
    return RepViT(cfgs, in_channels=in_channels, distillation=distillation)

# paddle.autograd.set_grad_enabled(False)

# T0 = 5
# T1 = 10

# def throughput(name, model, device, batch_size, resolution=224):
#     inputs = paddle.randn([batch_size, 3, 32, 128])
#     # paddle.cuda.empty_cache()
#     # paddle.cuda.synchronize()
#     start = time.time()
#     while time.time() - start < T0:
#         model(inputs)
#     timing = []
#     # paddle.cuda.synchronize()
#     while sum(timing) < T1:
#         start = time.time()
#         y = model(inputs)
#         # paddle.cuda.synchronize()
#         y = y.cpu().detach()
#         timing.append(time.time() - start)
#     timing = paddle.to_tensor(timing, dtype=paddle.float32)
#     print(name, device, batch_size / timing.mean().item(),
#           'images/s @ batch size', batch_size)

# device = "cuda:0"

# from argparse import ArgumentParser

# parser = ArgumentParser()

# parser.add_argument('--model', default='repvit_m0_9', type=str)
# parser.add_argument('--resolution', default=224, type=int)
# parser.add_argument('--batch-size', default=2048, type=int)

# def replace_batchnorm(net):
#     for child_name, child in net.named_children():
#         if hasattr(child, 'fuse'):
#             fused = child.fuse()
#             setattr(net, child_name, fused)
#             replace_batchnorm(fused)
#         elif isinstance(child, nn.BatchNorm2D):
#             setattr(net, child_name, nn.Identity())
#         else:
#             replace_batchnorm(child)

# if __name__ == "__main__":
#     # args = parser.parse_args()
#     for model_name in [repvit_svtr, repvit_m0_9, repvit_m1_0]:#[repvit_svtr, repvit_m0_6, repvit_m1_0, repvit_m0_9, repvit_m1_1, repvit_m1_5, repvit_m2_3]:
#     # model_name = args.model
#         batch_size = 1
#         resolution = 224
#         # paddle.cuda.empty_cache()
#         inputs = paddle.randn([batch_size, 3, 32,
#                                 128])
#         model = model_name(in_channels=1000)
#         total_params = sum(p.numel() for p in model.parameters())
#         print(f"模型参数量为：{total_params}")
#         replace_batchnorm(model)
#         total_params = sum(p.numel() for p in model.parameters())
#         print(f"模型参数量为：{total_params}")
#         # model.to(device)
#         model.eval()
#         throughput(model_name, model, device, batch_size, resolution=resolution)