import torch
import torch.nn as nn
from .backbones.resnet import ResNet, BasicBlock, Bottleneck
from loss.metric_learning import Arcface, Cosface, AMSoftmax, CircleLoss
from .backbones.resnet_ibn_a import resnet50_ibn_a, resnet101_ibn_a
from .backbones.se_resnet_ibn_a import se_resnet101_ibn_a
from .backbones.resnet_ibn_b import resnet101_ibn_b
from .backbones.efficientnet import EfficientNet
from .backbones.hrnet import HRnet
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchvision.models import resnet34
import logging
import copy

feat_channel_branch = 258  #258
feat_channel = feat_channel_branch * 9

only_use_Max_pool = False  # False:avg+max True:Max
only_use_Avg_pool = False  # 二者只能一个为True：全false：avg+max
use_ABP = False
close_OP_path = False  # True：去除OP的局部分支
add_douzi_path = True  # True：添加douzi path（左右上下4块concat一起）
close_RP_path = True  # True：去除RP的局部分支
start_from_conv2 = False  # True：从conv2开始，False：从conv3开始


def weights_init_kaiming(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
        if m.bias is not None:
            nn.init.constant_(m.bias, 0.0)

    elif classname.find('Conv') != -1:
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
        if m.bias is not None:
            try:
                nn.init.constant_(m.bias, 0.0)
            except:
                nn.init.constant(m.bias.data, 0.0)
    elif classname.find('BatchNorm') != -1:
        if m.affine:
            nn.init.constant_(m.weight, 1.0)
            nn.init.constant_(m.bias, 0.0)


# Defines the new fc layer and classification layer
# |--bn--|--Linear--|
class ClassBlock(nn.Module):
    def __init__(self, input_dim, relu=True, num_bottleneck=2048):
        super(ClassBlock, self).__init__()
        add_block1 = []
        # add_block1 += [nn.BatchNorm1d(input_dim)]
        if relu:
            add_block1 += [nn.LeakyReLU(0.1)]
            # add_block1 += [nn.ReLU(True)]
        add_block1 += [nn.Linear(input_dim, num_bottleneck, bias=False)]
        add_block1 = nn.Sequential(*add_block1)
        add_block1.apply(weights_init_kaiming)

        if input_dim == num_bottleneck:
            self.add_block1 = nn.Sequential(*[])
        self.add_block1 = add_block1

    def forward(self, x):
        x = x.view(x.shape[0], -1)
        x = self.add_block1(x)  # 没有BN的中间特征（512）
        return x


class mul_to_one_size(nn.Module):
    """conv1*1 bn relu -> conv
    H×W变为1×1"""

    def __init__(self, input_size, input_channel, out_channel):
        super(mul_to_one_size, self).__init__()
        mid_channel = out_channel // (input_size[0] * input_size[1])
        self.bn0 = nn.BatchNorm2d(input_channel)
        self.conv1 = nn.Conv2d(input_channel, mid_channel, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(mid_channel)
        self.relu = nn.LeakyReLU(0.1)
        self.conv2 = nn.Conv2d(mid_channel, out_channel, kernel_size=input_size, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channel)

    def forward(self, x):
        x = self.bn0(x)
        x = self.relu(x)
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)

        x = self.conv2(x)

        x = x.view(x.shape[0], -1)
        return x


class reduction(nn.Module):
    """bn relu conv1 bn relu
    降维：MGN中是直接conv bn relu——这里在input又加了bn relu
    """

    def __init__(self, input_channel, out_channel):
        super(reduction, self).__init__()
        self.bn0 = nn.BatchNorm2d(input_channel)
        self.relu = nn.LeakyReLU(0.1)
        self.conv1 = nn.Conv2d(input_channel, out_channel, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(out_channel)

        self.backbone = nn.Sequential(
            self.bn0,
            self.relu,
            self.conv1,
            self.bn1,
            self.relu
        )  # 整合成序列，方便进行权重初始化
        self.backbone.apply(weights_init_kaiming)

    def forward(self, x):
        x = self.backbone(x)

        # 4 dim to 2 dim for fc layer
        x = x.view(x.shape[0], -1)
        return x


class Pooling(nn.Module):
    """
    pooling
    """

    def __init__(self, gap_size):
        super(Pooling, self).__init__()
        self.avg = nn.AdaptiveAvgPool2d(gap_size)
        self.max = nn.AdaptiveMaxPool2d(gap_size)

    def forward(self, x):
        if only_use_Max_pool:
            x = self.max(x)
        elif only_use_Avg_pool:
            x = self.avg(x)
        else:
            x = self.avg(x) + self.max(x)
        return x


class GeM(nn.Module):

    def __init__(self, p=3.0, eps=1e-6, freeze_p=True):
        super(GeM, self).__init__()
        self.p = p if freeze_p else Parameter(torch.ones(1) * p)
        self.eps = eps

    def forward(self, x):
        return F.adaptive_avg_pool2d(x.clamp(min=self.eps).pow(self.p),
                                     (1, 1)).pow(1. / self.p)

    def __repr__(self):
        if isinstance(self.p, float):
            p = self.p
        else:
            p = self.p.data.tolist()[0]
        return self.__class__.__name__ + \
               '(' + 'p=' + '{:.4f}'.format(p) + \
               ', ' + 'eps=' + str(self.eps) + ')'


def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        nn.init.normal_(m.weight, std=0.001)
        if m.bias:
            nn.init.constant_(m.bias, 0.0)


class Backbone(nn.Module):
    def __init__(self, num_classes, cfg):
        super(Backbone, self).__init__()
        last_stride = cfg.MODEL.LAST_STRIDE
        model_path = cfg.MODEL.PRETRAIN_PATH
        model_name = cfg.MODEL.NAME
        pretrain_choice = cfg.MODEL.PRETRAIN_CHOICE
        self.cos_layer = cfg.MODEL.COS_LAYER
        self.neck = cfg.MODEL.NECK
        self.neck_feat = cfg.TEST.NECK_FEAT

        logger = logging.getLogger("reid_baseline.train")
        if model_name == 'resnet50_ibn_a':
            self.in_planes = 2048
            self.base = resnet50_ibn_a(last_stride)
            print('using resnet50_ibn_a as a backbone')
        elif model_name == 'resnet18':
            self.in_planes = 512
            self.base = ResNet(last_stride=last_stride,
                               block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN,
                               layers=[2, 2, 2, 2])
            print('using resnet18 as a backbone')
        elif model_name == 'resnet101_ibn_a':
            self.in_planes = 2048
            self.base = resnet101_ibn_a(last_stride, frozen_stages=cfg.MODEL.FROZEN)
            print('using resnet101_ibn_a as a backbone')
        elif model_name == 'se_resnet101_ibn_a':
            self.in_planes = 2048
            self.base = se_resnet101_ibn_a(last_stride)
            print('using se_resnet101_ibn_a as a backbone')
        elif model_name == 'resnet101_ibn_b':
            self.in_planes = 2048
            self.base = resnet101_ibn_b(last_stride)
            print('using resnet101_ibn_b as a backbone')

        if pretrain_choice == 'imagenet':
            self.base.load_param(model_path)
            print('Loading pretrained ImageNet model......from {}'.format(model_path))

        "不同的ID loss对应不同的 分类输出层"
        self.num_classes = num_classes
        self.ID_LOSS_TYPE = cfg.MODEL.ID_LOSS_TYPE
        if self.ID_LOSS_TYPE == 'arcface':
            print('using {} with s:{}, m: {}'.format(self.ID_LOSS_TYPE, cfg.SOLVER.COSINE_SCALE,
                                                     cfg.SOLVER.COSINE_MARGIN))
            self.classifier = Arcface(feat_channel_branch, self.num_classes,
                                      s=cfg.SOLVER.COSINE_SCALE, m=cfg.SOLVER.COSINE_MARGIN)
        else:
            "直接添加线性分类层，那么对应的ID loss就是softmax"
            self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)
            self.classifier.apply(weights_init_classifier)

        # BNNeck
        self.bottleneck = nn.BatchNorm1d(feat_channel_branch)
        self.bottleneck.bias.requires_grad_(False)
        self.bottleneck.apply(weights_init_kaiming)

        self.start_from_conv2 = start_from_conv2  # True：从conv2开始，False：从conv3开始
        self.use_ABP = use_ABP
        self.close_OP_path = close_OP_path  # True：去除OP的局部分支
        self.close_RP_path = close_RP_path  # True：去除RP的局部分支
        self.add_douzi_path = add_douzi_path  # True：添加douzi path（左右上下4块concat一起）

        gap1_size, gap2_size, gap3_size, gap22_size = (1, 1), (2, 1), (3, 1), (2, 2)
        # conv3后两个branch权重不共享，但两个path同一branch权重共享（原代码中是自己修改的layer4，这里直接用原来的layer4——下采样取消了）
        self.branch2_conv4 = copy.deepcopy(self.base.layer4)  #
        self.branch3_conv4 = copy.deepcopy(self.base.layer4)
        if not self.close_OP_path:
            "OP2:MGN路径2条带结构 ; OP3:3条带"
            # branch2
            self.OP2_avg1 = Pooling(gap1_size)
            self.OP2_avg2 = Pooling(gap2_size)
            self.OP2_p1_avg1 = Pooling(gap1_size)
            self.OP2_p2_avg1 = Pooling(gap1_size)
            self.OP2_gp_reduction = reduction(self.in_planes, feat_channel_branch)
            self.OP2_p1_reduction = reduction(self.in_planes, feat_channel_branch)
            self.OP2_p2_reduction = reduction(self.in_planes, feat_channel_branch)
            self.OP2_gp_BNNeck = copy.deepcopy(self.bottleneck)
            self.OP2_p1_BNNeck = copy.deepcopy(self.bottleneck)
            self.OP2_p2_BNNeck = copy.deepcopy(self.bottleneck)
            self.OP2_gp_cls = copy.deepcopy(self.classifier)
            self.OP2_p1_cls = copy.deepcopy(self.classifier)
            self.OP2_p2_cls = copy.deepcopy(self.classifier)
            # branch3
            self.OP3_avg1 = Pooling(gap1_size)
            self.OP3_avg3 = Pooling(gap3_size)
            self.OP3_p1_avg1 = Pooling(gap1_size)
            self.OP3_p2_avg1 = Pooling(gap1_size)
            self.OP3_p3_avg1 = Pooling(gap1_size)
            self.OP3_gp_reduction = reduction(self.in_planes, feat_channel_branch)
            self.OP3_p1_reduction = reduction(self.in_planes, feat_channel_branch)
            self.OP3_p2_reduction = reduction(self.in_planes, feat_channel_branch)
            self.OP3_p3_reduction = reduction(self.in_planes, feat_channel_branch)
            self.OP3_gp_BNNeck = copy.deepcopy(self.bottleneck)
            self.OP3_p1_BNNeck = copy.deepcopy(self.bottleneck)
            self.OP3_p2_BNNeck = copy.deepcopy(self.bottleneck)
            self.OP3_p3_BNNeck = copy.deepcopy(self.bottleneck)
            self.OP3_gp_cls = copy.deepcopy(self.classifier)
            self.OP3_p1_cls = copy.deepcopy(self.classifier)
            self.OP3_p2_cls = copy.deepcopy(self.classifier)
            self.OP3_p3_cls = copy.deepcopy(self.classifier)
        else:
            self.OP2_avg1 = Pooling(gap1_size)
            self.OP2_gp_reduction = reduction(self.in_planes, feat_channel_branch)
            self.OP2_gp_BNNeck = copy.deepcopy(self.bottleneck)
            self.OP2_gp_cls = copy.deepcopy(self.classifier)
            self.OP3_avg1 = Pooling(gap1_size)
            self.OP3_gp_reduction = reduction(self.in_planes, feat_channel_branch)
            self.OP3_gp_BNNeck = copy.deepcopy(self.bottleneck)
            self.OP3_gp_cls = copy.deepcopy(self.classifier)

        "RP2:RP路径2条带结构"

        if not self.close_RP_path:
            # branch2
            self.RP2_p1_avg1 = Pooling(gap1_size)
            self.RP2_p2_avg1 = Pooling(gap1_size)
            self.RP2_p1_reduction = reduction(self.in_planes, feat_channel_branch)
            self.RP2_p2_reduction = reduction(self.in_planes, feat_channel_branch)
            self.RP2_p1_BNNeck = copy.deepcopy(self.bottleneck)
            self.RP2_p2_BNNeck = copy.deepcopy(self.bottleneck)
            self.RP2_p1_cls = copy.deepcopy(self.classifier)
            self.RP2_p2_cls = copy.deepcopy(self.classifier)
            # branch3
            self.RP3_p1_avg1 = Pooling(gap1_size)
            self.RP3_p2_avg1 = Pooling(gap1_size)
            self.RP3_p3_avg1 = Pooling(gap1_size)
            self.RP3_p1_reduction = reduction(self.in_planes, feat_channel_branch)
            self.RP3_p2_reduction = reduction(self.in_planes, feat_channel_branch)
            self.RP3_p3_reduction = reduction(self.in_planes, feat_channel_branch)
            self.RP3_p1_BNNeck = copy.deepcopy(self.bottleneck)
            self.RP3_p2_BNNeck = copy.deepcopy(self.bottleneck)
            self.RP3_p3_BNNeck = copy.deepcopy(self.bottleneck)
            self.RP3_p1_cls = copy.deepcopy(self.classifier)
            self.RP3_p2_cls = copy.deepcopy(self.classifier)
            self.RP3_p3_cls = copy.deepcopy(self.classifier)

        "douzi path"
        "avg->concat->conv1*1"
        if self.add_douzi_path:
            # branch2
            self.DZ2_avg22 = nn.AdaptiveAvgPool2d(gap22_size)
            self.DZ2_max22 = nn.AdaptiveMaxPool2d(gap22_size)
            self.DZ2_reduction = reduction(self.in_planes * gap22_size[0] * gap22_size[1], feat_channel_branch)
            self.DZ2_BNNeck = copy.deepcopy(self.bottleneck)
            self.DZ2_cls = copy.deepcopy(self.classifier)
            # branch3
            self.DZ3_avg22 = nn.AdaptiveAvgPool2d(gap22_size)
            self.DZ3_max22 = nn.AdaptiveMaxPool2d(gap22_size)
            self.DZ3_reduction = reduction(self.in_planes * gap22_size[0] * gap22_size[1], feat_channel_branch)
            self.DZ3_BNNeck = copy.deepcopy(self.bottleneck)
            self.DZ3_cls = copy.deepcopy(self.classifier)

    def forward(self, x, label=None):  # label is unused if self.cos_layer == 'no'
        "1. backbone"
        x = self.base.conv1(x)  # ×2
        x = self.base.bn1(x)
        x = self.base.relu(x)
        x = self.base.maxpool(x)  # ×2 ｜ ×4

        x = self.base.layer1(x)
        x_conv2 = self.base.layer2(x)  # ×2 ｜ ×8
        x_conv3 = self.base.layer3(x_conv2)  # ×2 ｜ ×16
        # conv3后面开始分枝

        if not self.close_OP_path:
            ## MGN原始path
            # branch2
            x_OP2_conv4 = self.branch2_conv4(x_conv3)
            x_OP2_gp = self.OP2_avg1(x_OP2_conv4)  # OP2_gp:第2个branch的全局池化1×1
            if self.use_ABP:
                # ABP
                index = split_at_H_dim(x_OP2_conv4, split_num=2)
                x_OP2_p1_conv4 = x_OP2_conv4[:, :, 0:index[0], :]
                x_OP2_p2_conv4 = x_OP2_conv4[:, :, index[0]:, :]
                x_OP2_p1 = self.OP2_p1_avg1(x_OP2_p1_conv4)
                x_OP2_p2 = self.OP2_p2_avg1(x_OP2_p2_conv4)
            else:
                x_OP2_p = self.OP2_avg2(x_OP2_conv4)  # OP2_p:第2个branch的自适应池化2×1
                x_OP2_p1 = x_OP2_p[:, :, 0:1, :]  # OP2_p1:第2个branch局部池化第1条带
                x_OP2_p2 = x_OP2_p[:, :, 1:2, :]  # OP2_p2:第2个branch局部池化第2条带
            x_OP2_gp_reduction = self.OP2_gp_reduction(x_OP2_gp)  # 降维后的特征
            x_OP2_p1_reduction = self.OP2_p1_reduction(x_OP2_p1)
            x_OP2_p2_reduction = self.OP2_p2_reduction(x_OP2_p2)

            x_OP2_gp_bnneck = self.OP2_gp_BNNeck(x_OP2_gp_reduction)  # bnneck
            x_OP2_p1_bnneck = self.OP2_p1_BNNeck(x_OP2_p1_reduction)
            x_OP2_p2_bnneck = self.OP2_p2_BNNeck(x_OP2_p2_reduction)

            # branch3
            x_OP3_conv4 = self.branch3_conv4(x_conv3)
            x_OP3_gp = self.OP3_avg1(x_OP3_conv4)
            if self.use_ABP:
                index = split_at_H_dim(x_OP3_conv4, split_num=3)
                x_OP3_p1_conv4 = x_OP3_conv4[:, :, 0:index[0], :]
                x_OP3_p2_conv4 = x_OP3_conv4[:, :, index[0]:index[1], :]
                x_OP3_p3_conv4 = x_OP3_conv4[:, :, index[1]:, :]
                x_OP3_p1 = self.OP3_p1_avg1(x_OP3_p1_conv4)
                x_OP3_p2 = self.OP3_p2_avg1(x_OP3_p2_conv4)
                x_OP3_p3 = self.OP3_p3_avg1(x_OP3_p3_conv4)
            else:
                x_OP3_p = self.OP3_avg3(x_OP3_conv4)
                x_OP3_p1 = x_OP3_p[:, :, 0:1, :]
                x_OP3_p2 = x_OP3_p[:, :, 1:2, :]
                x_OP3_p3 = x_OP3_p[:, :, 2:3, :]
            x_OP3_gp_reduction = self.OP3_gp_reduction(x_OP3_gp)  # 降维后的特征
            x_OP3_p1_reduction = self.OP3_p1_reduction(x_OP3_p1)
            x_OP3_p2_reduction = self.OP3_p2_reduction(x_OP3_p2)
            x_OP3_p3_reduction = self.OP3_p3_reduction(x_OP3_p3)

            x_OP3_gp_bnneck = self.OP3_gp_BNNeck(x_OP3_gp_reduction)  # bnneck
            x_OP3_p1_bnneck = self.OP3_p1_BNNeck(x_OP3_p1_reduction)
            x_OP3_p2_bnneck = self.OP3_p2_BNNeck(x_OP3_p2_reduction)
            x_OP3_p3_bnneck = self.OP3_p3_BNNeck(x_OP3_p3_reduction)
        else:
            x_OP2_conv4 = self.branch2_conv4(x_conv3)
            x_OP2_gp = self.OP2_avg1(x_OP2_conv4)  # OP2_gp:第2个branch的全局池化1×1
            x_OP2_gp_reduction = self.OP2_gp_reduction(x_OP2_gp)  # 降维后的特征
            x_OP2_gp_bnneck = self.OP2_gp_BNNeck(x_OP2_gp_reduction)  # bnneck
            x_OP3_conv4 = self.branch3_conv4(x_conv3)
            x_OP3_gp = self.OP3_avg1(x_OP3_conv4)
            x_OP3_gp_reduction = self.OP3_gp_reduction(x_OP3_gp)  # 降维后的特征
            x_OP3_gp_bnneck = self.OP3_gp_BNNeck(x_OP3_gp_reduction)  # bnneck

        "Receptive partitioned path"
        if not self.close_RP_path:
            # 从conv3开始
            # branch2
            if self.start_from_conv2:
                x_RP2_p1_conv2, x_RP2_p2_conv2 = torch.chunk(x_conv2, chunks=2, dim=2)
                x_RP2_conv2 = torch.cat((x_RP2_p1_conv2, x_RP2_p2_conv2), dim=0)  # 按H分割，按batch拼接
                x_RP2_conv3 = self.base.layer3(x_RP2_conv2)
            else:
                x_RP2_p1_conv3, x_RP2_p2_conv3 = torch.chunk(x_conv3, chunks=2, dim=2)
                x_RP2_conv3 = torch.cat((x_RP2_p1_conv3, x_RP2_p2_conv3), dim=0)  # 按H分割，按batch拼接
            x_RP2_conv4 = self.branch2_conv4(x_RP2_conv3)
            x_RP2_p1_conv4, x_RP2_p2_conv4 = torch.chunk(x_RP2_conv4, chunks=2, dim=0)  # batch分割
            if self.use_ABP:
                x_RP2_conv4 = torch.cat((x_RP2_p1_conv4, x_RP2_p2_conv4), dim=2)  # H拼接
                index = split_at_H_dim(x_RP2_conv4, 2)
                x_RP2_p1_conv4 = x_RP2_conv4[:, :, 0:index[0], :]
                x_RP2_p2_conv4 = x_RP2_conv4[:, :, index[0]:, :]
            x_RP2_p1 = self.RP2_p1_avg1(x_RP2_p1_conv4)
            x_RP2_p2 = self.RP2_p2_avg1(x_RP2_p2_conv4)
            x_RP2_p1_reduction = self.RP2_p1_reduction(x_RP2_p1)
            x_RP2_p2_reduction = self.RP2_p2_reduction(x_RP2_p2)

            x_RP2_p1_bnneck = self.RP2_p1_BNNeck(x_RP2_p1_reduction)
            x_RP2_p2_bnneck = self.RP2_p2_BNNeck(x_RP2_p2_reduction)

            # branch3
            if self.start_from_conv2:
                x_RP3_p1_conv2, x_RP3_p2_conv2, x_RP3_p3_conv2 = torch.chunk(x_conv2, chunks=3, dim=2)
                x_RP3_conv2 = torch.cat((x_RP3_p1_conv2, x_RP3_p2_conv2, x_RP3_p3_conv2), dim=0)  # 按H分割，按batch拼接
                x_RP3_conv3 = self.base.layer3(x_RP3_conv2)
            else:
                x_RP3_p1_conv3, x_RP3_p2_conv3, x_RP3_p3_conv3 = torch.chunk(x_conv3, chunks=3, dim=2)
                x_RP3_conv3 = torch.cat((x_RP3_p1_conv3, x_RP3_p2_conv3, x_RP3_p3_conv3), dim=0)  # 按H分割，按batch拼接
            x_RP3_conv4 = self.branch3_conv4(x_RP3_conv3)
            x_RP3_p1_conv4, x_RP3_p2_conv4, x_RP3_p3_conv4 = torch.chunk(x_RP3_conv4, chunks=3, dim=0)
            if self.use_ABP:
                x_RP3_conv4 = torch.cat((x_RP3_p1_conv4, x_RP3_p2_conv4, x_RP3_p3_conv4), dim=2)
                index = split_at_H_dim(x_RP3_conv4, split_num=3)
                x_RP3_p1_conv4 = x_RP3_conv4[:, :, 0:index[0], :]
                x_RP3_p2_conv4 = x_RP3_conv4[:, :, index[0]:index[1], :]
                x_RP3_p3_conv4 = x_RP3_conv4[:, :, index[1]:, :]
            x_RP3_p1 = self.RP3_p1_avg1(x_RP3_p1_conv4)
            x_RP3_p2 = self.RP3_p2_avg1(x_RP3_p2_conv4)
            x_RP3_p3 = self.RP3_p3_avg1(x_RP3_p3_conv4)
            x_RP3_p1_reduction = self.RP3_p1_reduction(x_RP3_p1)
            x_RP3_p2_reduction = self.RP3_p2_reduction(x_RP3_p2)
            x_RP3_p3_reduction = self.RP3_p3_reduction(x_RP3_p3)

            x_RP3_p1_bnneck = self.RP3_p1_BNNeck(x_RP3_p1_reduction)
            x_RP3_p2_bnneck = self.RP3_p2_BNNeck(x_RP3_p2_reduction)
            x_RP3_p3_bnneck = self.RP3_p3_BNNeck(x_RP3_p3_reduction)

        "douzi path"
        if self.add_douzi_path:
            # branch2
            x_DZ2 = self.DZ2_avg22(x_OP2_conv4) + self.DZ2_max22(x_OP2_conv4)
            x_DZ2 = torch.cat((x_DZ2[:, :, 0, 0], x_DZ2[:, :, 0, 1], x_DZ2[:, :, 1, 0], x_DZ2[:, :, 1, 1]), dim=1).unsqueeze(-1).unsqueeze(-1)
            x_DZ2_reduction = self.DZ2_reduction(x_DZ2)
            x_DZ2_bnneck = self.DZ2_BNNeck(x_DZ2_reduction)
            # branch3
            x_DZ3 = self.DZ3_avg22(x_OP3_conv4) + self.DZ3_max22(x_OP2_conv4)
            x_DZ3 = torch.cat((x_DZ3[:, :, 0, 0], x_DZ3[:, :, 0, 1], x_DZ3[:, :, 1, 0], x_DZ3[:, :, 1, 1]), dim=1).unsqueeze(-1).unsqueeze(-1)
            x_DZ3_reduction = self.DZ3_reduction(x_DZ3)
            x_DZ3_bnneck = self.DZ3_BNNeck(x_DZ3_reduction)

        if self.training:
            if not self.close_OP_path:
                cls_score_OP2_gp = self.OP2_gp_cls(x_OP2_gp_bnneck, label)  # calculate cls score
                cls_score_OP2_p1 = self.OP2_p1_cls(x_OP2_p1_bnneck, label)
                cls_score_OP2_p2 = self.OP2_p2_cls(x_OP2_p2_bnneck, label)

                cls_score_OP3_gp = self.OP3_gp_cls(x_OP3_gp_bnneck, label)  # calculate cls score
                cls_score_OP3_p1 = self.OP3_p1_cls(x_OP3_p1_bnneck, label)
                cls_score_OP3_p2 = self.OP3_p2_cls(x_OP3_p2_bnneck, label)
                cls_score_OP3_p3 = self.OP3_p3_cls(x_OP3_p3_bnneck, label)
            else:
                cls_score_OP2_gp = self.OP2_gp_cls(x_OP2_gp_bnneck, label)  # calculate cls score
                cls_score_OP3_gp = self.OP3_gp_cls(x_OP3_gp_bnneck, label)  # calculate cls score

            if not self.close_RP_path:
                cls_score_RP2_p1 = self.RP2_p1_cls(x_RP2_p1_bnneck, label)
                cls_score_RP2_p2 = self.RP2_p2_cls(x_RP2_p2_bnneck, label)

                cls_score_RP3_p1 = self.RP3_p1_cls(x_RP3_p1_bnneck, label)
                cls_score_RP3_p2 = self.RP3_p2_cls(x_RP3_p2_bnneck, label)
                cls_score_RP3_p3 = self.RP3_p3_cls(x_RP3_p3_bnneck, label)

            if (not self.close_OP_path) and (not self.close_RP_path):
                ret = [cls_score_OP2_gp, x_OP2_gp_reduction, cls_score_OP2_p1, x_OP2_p1_reduction, cls_score_OP2_p2, x_OP2_p2_reduction,
                       cls_score_RP2_p1, x_RP2_p1_reduction, cls_score_RP2_p2, x_RP2_p2_reduction,
                       cls_score_OP3_gp, x_OP3_gp_reduction, cls_score_OP3_p1, x_OP3_p1_reduction, cls_score_OP3_p2, x_OP3_p2_reduction, cls_score_OP3_p3, x_OP3_p3_reduction,
                       cls_score_RP3_p1, x_RP3_p1_reduction, cls_score_RP3_p2, x_RP3_p2_reduction, cls_score_RP3_p3, x_RP3_p3_reduction]
            elif self.close_OP_path:
                ret = [cls_score_OP2_gp, x_OP2_gp_reduction,
                       cls_score_RP2_p1, x_RP2_p1_reduction, cls_score_RP2_p2, x_RP2_p2_reduction,
                       cls_score_OP3_gp, x_OP3_gp_reduction,
                       cls_score_RP3_p1, x_RP3_p1_reduction, cls_score_RP3_p2, x_RP3_p2_reduction, cls_score_RP3_p3, x_RP3_p3_reduction]
            elif self.close_RP_path:
                ret = [cls_score_OP2_gp, x_OP2_gp_reduction, cls_score_OP2_p1, x_OP2_p1_reduction, cls_score_OP2_p2, x_OP2_p2_reduction,
                       cls_score_OP3_gp, x_OP3_gp_reduction, cls_score_OP3_p1, x_OP3_p1_reduction, cls_score_OP3_p2, x_OP3_p2_reduction, cls_score_OP3_p3, x_OP3_p3_reduction]
            if self.add_douzi_path:
                cls_score_DZ2 = self.DZ2_cls(x_DZ2_bnneck, label)
                cls_score_DZ3 = self.DZ3_cls(x_DZ3_bnneck, label)
                ret += [cls_score_DZ2, x_DZ2_reduction, cls_score_DZ3, x_DZ3_reduction]
            return ret
        else:
            if self.neck_feat == 'after':
                # print("Test with feature after BN")
                if (not self.close_OP_path) and (not self.close_RP_path):
                    ret = torch.cat(
                        (
                            x_OP2_gp_bnneck, x_OP2_p1_bnneck, x_OP2_p2_bnneck,
                            x_RP2_p1_bnneck, x_RP2_p2_bnneck,
                            x_OP3_gp_bnneck, x_OP3_p1_bnneck, x_OP3_p2_bnneck, x_OP3_p3_bnneck,
                            x_RP3_p1_bnneck, x_RP3_p2_bnneck, x_RP3_p3_bnneck
                        ),
                        1
                    )
                elif self.close_OP_path:
                    ret = torch.cat(
                        (
                            x_OP2_gp_bnneck,
                            x_RP2_p1_bnneck, x_RP2_p2_bnneck,
                            x_OP3_gp_bnneck,
                            x_RP3_p1_bnneck, x_RP3_p2_bnneck, x_RP3_p3_bnneck
                        ),
                        1
                    )
                elif self.close_RP_path:
                    ret = torch.cat(
                        (
                            x_OP2_gp_bnneck, x_OP2_p1_bnneck, x_OP2_p2_bnneck,
                            x_OP3_gp_bnneck, x_OP3_p1_bnneck, x_OP3_p2_bnneck, x_OP3_p3_bnneck,
                        ),
                        1
                    )
                if self.add_douzi_path:
                    ret = torch.cat((ret, x_DZ2_bnneck, x_DZ3_bnneck), dim=1)
            else:
                # print("Test with feature before BN")
                if (not self.close_OP_path) and (not self.close_RP_path):
                    ret = torch.cat(
                        (
                            x_OP2_gp_reduction, x_OP2_p1_reduction, x_OP2_p2_reduction,
                            x_RP2_p1_reduction, x_RP2_p2_reduction,
                            x_OP3_gp_reduction, x_OP3_p1_reduction, x_OP3_p2_reduction, x_OP3_p3_reduction,
                            x_RP3_p1_reduction, x_RP3_p2_reduction, x_RP3_p3_reduction
                        ),
                        1
                    )
                elif self.close_OP_path:
                    ret = torch.cat(
                        (
                            x_OP2_gp_reduction,
                            x_RP2_p1_reduction, x_RP2_p2_reduction,
                            x_OP3_gp_reduction,
                            x_RP3_p1_reduction, x_RP3_p2_reduction, x_RP3_p3_reduction
                        ),
                        1
                    )
                elif self.close_RP_path:
                    ret = torch.cat(
                        (
                            x_OP2_gp_reduction, x_OP2_p1_reduction, x_OP2_p2_reduction,
                            x_OP3_gp_reduction, x_OP3_p1_reduction, x_OP3_p2_reduction, x_OP3_p3_reduction
                        ),
                        1
                    )
                if self.add_douzi_path:
                    ret = torch.cat((ret, x_DZ2_reduction, x_DZ3_reduction), dim=1)
            return ret

    def load_param(self, trained_path):
        param_dict = torch.load(trained_path)
        for i in param_dict:
            if 'classifier' in i or 'arcface' in i:
                continue
            self.state_dict()[i].copy_(param_dict[i])
        print('Loading pretrained model from {}'.format(trained_path))

    def load_param_finetune(self, model_path):
        param_dict = torch.load(model_path)
        for i in param_dict:
            self.state_dict()[i].copy_(param_dict[i])
        print('Loading pretrained model for finetuning from {}'.format(model_path))

    def load_param_finetune_from_dict(self, param_dict):
        for i in param_dict:
            try:
                self.state_dict()[i].copy_(param_dict[i])
            except:
                pass
            # self.state_dict()[i.replace('module.','')].copy_(param_dict[i])


def split_at_H_dim(x, split_num=2):
    """
    统计batch内所有的sample
    :param x:
    :param split_num:
    :return:
    """
    B = x.shape[0]
    H = x.shape[2]
    C = x.shape[1] * B
    idx = x.max(dim=3)[0].argmax(2)  # n×c 所有batch每个特征图最大值的h索引

    num_H = (idx == 0).sum(dim=1).reshape((B, -1))  # n×h 统计所有batch每个h高度有几张特征图
    for i in range(1, H):
        num_H = torch.cat((num_H, (idx == i).sum(dim=1).reshape((B, -1))), dim=1)
    num_H = num_H.sum(dim=0)
    cumsum_num_H = torch.cumsum(num_H, dim=0)
    # 0:1 1:2 2:3
    if split_num == 3:
        mid = ((cumsum_num_H > C / split_num) & (cumsum_num_H <= 2 * C / split_num))
        _, index1 = mid.max(dim=0, keepdim=True)
        index1_add = mid.sum(dim=0, keepdim=True)
        index2 = index1 + index1_add
        index = torch.cat((index1, index2))
    elif split_num == 2:
        mid = (cumsum_num_H > C / split_num)
        _, index1 = mid.max(dim=0, keepdim=True)
        index = index1
    # print(index.cpu().numpy())
    return index


def make_model(cfg, num_class):
    model = Backbone(num_class, cfg)
    return model
