import torch
import torch.nn as nn
from .backbones.resnet import ResNet, BasicBlock, Bottleneck
from loss.metric_learning import Arcface, Cosface, AMSoftmax, CircleLoss
from .backbones.resnet_ibn_a import resnet50_ibn_a, resnet101_ibn_a
from .backbones.se_resnet_ibn_a import se_resnet101_ibn_a
from .backbones.resnet_ibn_b import resnet101_ibn_b
from .backbones.efficientnet import EfficientNet
from .backbones.hrnet import HRnet
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchvision.models import resnet34
import copy
import logging

feat_channel_branch = 2048 // 4
feat_channel = feat_channel_branch * 3


def weights_init_kaiming(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
        if m.bias is not None:
            nn.init.constant_(m.bias, 0.0)

    elif classname.find('Conv') != -1:
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
        if m.bias is not None:
            try:
                nn.init.constant_(m.bias, 0.0)
            except:
                nn.init.constant(m.bias.data, 0.0)
    elif classname.find('BatchNorm') != -1:
        if m.affine:
            nn.init.constant_(m.weight, 1.0)
            nn.init.constant_(m.bias, 0.0)


# Defines the new fc layer and classification layer
# |--bn--|--Linear--|
class ClassBlock(nn.Module):
    def __init__(self, input_dim, relu=True, num_bottleneck=2048):
        super(ClassBlock, self).__init__()
        add_block1 = []
        # add_block1 += [nn.BatchNorm1d(input_dim)]
        if relu:
            add_block1 += [nn.LeakyReLU(0.1)]
            # add_block1 += [nn.ReLU(True)]
        add_block1 += [nn.Linear(input_dim, num_bottleneck, bias=False)]
        add_block1 = nn.Sequential(*add_block1)
        add_block1.apply(weights_init_kaiming)

        if input_dim == num_bottleneck:
            self.add_block1 = nn.Sequential(*[])
        self.add_block1 = add_block1

    def forward(self, x):
        x = x.view(x.shape[0], -1)
        x = self.add_block1(x)  # 没有BN的中间特征（512）
        return x


class mul_to_one_size(nn.Module):
    "conv1*1 bn relu -> conv"

    def __init__(self, input_size, input_channel, out_channel):
        super(mul_to_one_size, self).__init__()
        mid_channel = out_channel // (input_size[0] * input_size[1])
        self.bn0 = nn.BatchNorm2d(input_channel)
        self.conv1 = nn.Conv2d(input_channel, mid_channel, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(mid_channel)
        self.relu = nn.LeakyReLU(0.1)
        self.conv2 = nn.Conv2d(mid_channel, out_channel, kernel_size=input_size, bias=False)
        self.bn2 = nn.BatchNorm2d(out_channel)

    def forward(self, x):
        x = self.bn0(x)
        x = self.relu(x)
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)

        x = self.conv2(x)

        x = x.view(x.shape[0], -1)
        return x


class GeM(nn.Module):

    def __init__(self, p=3.0, eps=1e-6, freeze_p=True):
        super(GeM, self).__init__()
        self.p = p if freeze_p else Parameter(torch.ones(1) * p)
        self.eps = eps

    def forward(self, x):
        return F.adaptive_avg_pool2d(x.clamp(min=self.eps).pow(self.p),
                                     (1, 1)).pow(1. / self.p)

    def __repr__(self):
        if isinstance(self.p, float):
            p = self.p
        else:
            p = self.p.data.tolist()[0]
        return self.__class__.__name__ + \
               '(' + 'p=' + '{:.4f}'.format(p) + \
               ', ' + 'eps=' + str(self.eps) + ')'


def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        nn.init.normal_(m.weight, std=0.001)
        if m.bias:
            nn.init.constant_(m.bias, 0.0)


class Backbone(nn.Module):
    def __init__(self, num_classes, cfg):
        super(Backbone, self).__init__()
        last_stride = cfg.MODEL.LAST_STRIDE
        model_path = cfg.MODEL.PRETRAIN_PATH
        model_name = cfg.MODEL.NAME
        pretrain_choice = cfg.MODEL.PRETRAIN_CHOICE
        self.cos_layer = cfg.MODEL.COS_LAYER
        self.neck = cfg.MODEL.NECK
        self.neck_feat = cfg.TEST.NECK_FEAT

        logger = logging.getLogger("reid_baseline.train")
        if model_name == 'resnet50':
            self.in_planes = 2048
            self.base = ResNet(last_stride=last_stride,
                               block=Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
                               layers=[3, 4, 6, 3])
            print('using resnet50 as a backbone')
        elif model_name == 'resnet34':
            self.in_planes = 512
            self.base = ResNet(last_stride=last_stride,
                               block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN,
                               layers=[3, 4, 6, 3])
            print('using resnet34 as a backbone')
        elif model_name == 'resnet18':
            self.in_planes = 512
            self.base = ResNet(last_stride=last_stride,
                               block=BasicBlock, frozen_stages=cfg.MODEL.FROZEN,
                               layers=[2, 2, 2, 2])
            print('using resnet18 as a backbone')
        elif model_name == 'efficientnet-b1':
            self.in_planes = 1280
            self.base = EfficientNet.from_name(model_name)
        elif model_name == 'efficientnet-b2':
            self.in_planes = 1408
            self.base = EfficientNet.from_name(model_name)
        elif model_name == 'efficientnet-b3':
            self.in_planes = 1536
            self.base = EfficientNet.from_name(model_name)
        elif model_name == 'efficientnet-b4':
            self.in_planes = 1792
            self.base = EfficientNet.from_name(model_name)
        elif model_name == 'efficientnet-b5':
            self.in_planes = 2048
            self.base = EfficientNet.from_name(model_name)
        elif model_name == 'hrnet':
            self.in_planes = 1024
            self.base = HRnet()
            print('using HRnet as a backbone')
        elif model_name == 'resnet101':
            self.in_planes = 2048
            self.base = ResNet(last_stride=last_stride,
                               block=Bottleneck, frozen_stages=cfg.MODEL.FROZEN,
                               layers=[3, 4, 23, 3])
            print('using resnet101 as a backbone')
        elif model_name == 'resnet50_ibn_a':
            self.in_planes = 2048
            self.base = resnet50_ibn_a(last_stride)
            print('using resnet50_ibn_a as a backbone')
        elif model_name == 'resnet101_ibn_a':
            self.in_planes = 2048
            self.base = resnet101_ibn_a(last_stride, frozen_stages=cfg.MODEL.FROZEN)
            print('using resnet101_ibn_a as a backbone')
        elif model_name == 'se_resnet101_ibn_a':
            self.in_planes = 2048
            self.base = se_resnet101_ibn_a(last_stride)
            print('using se_resnet101_ibn_a as a backbone')
        elif model_name == 'resnet101_ibn_b':
            self.in_planes = 2048
            self.base = resnet101_ibn_b(last_stride)
            print('using resnet101_ibn_b as a backbone')
        else:
            print('unsupported backbone! but got {}'.format(model_name))

        if pretrain_choice == 'imagenet':
            self.base.load_param(model_path)
            print('Loading pretrained ImageNet model......from {}'.format(model_path))

        gap_size, gap2_size, gap3_size = (1, 1), (2, 1), (3, 1)
        self.gap = nn.AdaptiveAvgPool2d(gap_size)
        self.gapMax = nn.AdaptiveMaxPool2d(gap_size)
        self.gapAvg2 = nn.AdaptiveAvgPool2d(gap2_size)
        self.gapMax2 = nn.AdaptiveMaxPool2d(gap2_size)
        self.gapAvg3 = nn.AdaptiveAvgPool2d(gap3_size)
        self.gapMax3 = nn.AdaptiveMaxPool2d(gap3_size)

        "不同的ID loss对应不同的 分类输出层"
        self.num_classes = num_classes
        self.ID_LOSS_TYPE = cfg.MODEL.ID_LOSS_TYPE
        if self.ID_LOSS_TYPE == 'arcface':
            print('using {} with s:{}, m: {}'.format(self.ID_LOSS_TYPE, cfg.SOLVER.COSINE_SCALE,
                                                     cfg.SOLVER.COSINE_MARGIN))
            self.classifier = Arcface(feat_channel_branch, self.num_classes,
                                      s=cfg.SOLVER.COSINE_SCALE, m=cfg.SOLVER.COSINE_MARGIN)
            self.classifier2 = copy.deepcopy(self.classifier)
            self.classifier3 = copy.deepcopy(self.classifier)
        else:
            "直接添加线性分类层，那么对应的ID loss就是softmax"
            self.classifier = nn.Linear(self.in_planes, self.num_classes, bias=False)
            self.classifier.apply(weights_init_classifier)

        use_concat = False
        if use_concat:
            logger.info("use concat for Head")
            self.change_channel = ClassBlock(self.in_planes, relu=True, num_bottleneck=feat_channel_branch)
            self.change_channel2 = ClassBlock(self.in_planes * gap2_size[0] * gap2_size[1], relu=True,
                                              num_bottleneck=feat_channel_branch)
            self.change_channel3 = ClassBlock(self.in_planes * gap3_size[0] * gap3_size[1], relu=True,
                                              num_bottleneck=feat_channel_branch)
        else:
            logger.info("use conv to replace concat for Head")
            self.change_channel = mul_to_one_size(gap_size, self.in_planes, feat_channel_branch)
            self.change_channel2 = mul_to_one_size(gap2_size, self.in_planes, feat_channel_branch)
            self.change_channel3 = mul_to_one_size(gap3_size, self.in_planes, feat_channel_branch)
        # 1
        self.bottleneck = nn.BatchNorm1d(feat_channel_branch)
        self.bottleneck.bias.requires_grad_(False)
        self.bottleneck.apply(weights_init_kaiming)
        # 2
        self.bottleneck2 = copy.deepcopy(self.bottleneck)
        # 3
        self.bottleneck3 = copy.deepcopy(self.bottleneck)

    def forward(self, x, label=None):  # label is unused if self.cos_layer == 'no'
        x = self.base(x)

        global_feat = self.gap(x)# + self.gapMax(x)
        # global_feat = global_feat.view(global_feat.shape[0], -1)  # flatten to (bs, 2048)
        global_feat = self.change_channel(global_feat)  # (bs,1024)
        # 2
        gf2 = self.gapAvg2(x)# + self.gapMax2(x)
        gf2 = self.change_channel2(gf2)  # (bs, 1024)
        # 3
        gf3 = self.gapAvg3(x)# + self.gapMax3(x)
        gf3 = self.change_channel3(gf3)  # (bs, 1024)

        feat = self.bottleneck(global_feat)
        feat2 = self.bottleneck2(gf2)
        feat3 = self.bottleneck3(gf3)

        if self.neck == 'no':
            feat = global_feat
        elif self.neck == 'bnneck':
            feat = self.bottleneck(global_feat)
            feat2 = self.bottleneck2(gf2)
            feat3 = self.bottleneck3(gf3)

        if self.training:
            if self.ID_LOSS_TYPE in ('arcface', 'cosface', 'amsoftmax', 'circle'):
                cls_score = self.classifier(feat, label)
                cls_score2 = self.classifier2(feat2, label)
                cls_score3 = self.classifier3(feat3, label)
            else:
                cls_score = self.classifier(feat)
                cls_score2 = self.classifier2(feat2)
                cls_score3 = self.classifier3(feat3)

            return cls_score, global_feat, cls_score2, gf2, cls_score3, gf3
        else:
            if self.neck_feat == 'after':
                # print("Test with feature after BN")
                return torch.cat((feat, feat2, feat3), 1)
            else:
                # print("Test with feature before BN")
                return torch.cat((global_feat, gf2, gf3), 1)

    def load_param(self, trained_path):
        param_dict = torch.load(trained_path)
        for i in param_dict:
            if 'classifier' in i or 'arcface' in i:
                continue
            self.state_dict()[i].copy_(param_dict[i])
        print('Loading pretrained model from {}'.format(trained_path))

    def load_param_finetune(self, model_path):
        param_dict = torch.load(model_path)
        for i in param_dict:
            self.state_dict()[i].copy_(param_dict[i])
        print('Loading pretrained model for finetuning from {}'.format(model_path))

    def load_param_finetune_from_dict(self, param_dict):
        for i in param_dict:
            self.state_dict()[i].copy_(param_dict[i])
            # self.state_dict()[i.replace('module.','')].copy_(param_dict[i])


def make_model(cfg, num_class):
    model = Backbone(num_class, cfg)
    return model
