# inspired by PVEN - Parsing-based Viewaware Embedding Network 
# https://github.com/silverbulletmdc/PVEN
# paper: Parsing-based viewaware embedding network for vehicle ReID
# By Meng, Dechao and Li, Liang and Liu, Xuejing and Li, et al.
# [https://arxiv.org/abs/2004.05021]


__all__ = ['PVASFF']

import torch
import torch.nn as nn
import torch.nn.functional as F

import inspect
import numpy as np
from models.blocks import Prebuild_ResNet, ResNet, ResNetV2, resnet_model_urls    # mResNet as Prebuild_ResNet
from models.blocks import PartAttentionModule
from models.blocks import mask_adaptive_avg_pool2d, MaskAdaptiveAvgPool2d
from models.blocks import mask_global_covariance_pool
from models.blocks import attention
from torch.hub import load_state_dict_from_url
# from .blocks import Prebuild_ResNet, ResNet, ResNetV2, resnet_model_urls
# from .blocks import AdaptiveSpatialFeatureFusion as ASFF
# from .blocks import PartAttentionModule
# from .blocks import mask_adaptive_avg_pool2d, MaskAdaptiveAvgPool2d
# from .blocks import mask_global_covariance_pool
# from .blocks import attention
from torch.hub import load_state_dict_from_url

from collections import OrderedDict
from collections.abc import Iterable
from typing import Union, Optional, Tuple, List
from yacs.config import CfgNode

from models.tools.valid_tools import list2dict
# from .tools.valid_tools import list2dict
from models.triplet_attention import *
from models.se_module import SELayer, MultiScaleLayer_v2, SELayer_Local, BasicConv2d

def _check_integer(_x: Iterable):
    for __x in _x:
        if not isinstance(__x, int):
            return False
    return True


def weights_init_kaiming(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
        nn.init.constant_(m.bias, 0.0)

    elif classname.find('Conv') != -1:
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
        if m.bias is not None:
            nn.init.constant_(m.bias, 0.0)
    elif classname.find('BatchNorm') != -1:
        if m.affine:
            nn.init.constant_(m.weight, 1.0)
            nn.init.constant_(m.bias, 0.0)


def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        nn.init.normal_(m.weight, std=0.001)
        if m.bias:
            nn.init.constant_(m.bias, 0.0)


class PVASFF(nn.Module):

    def __init__(self, cfg: CfgNode, num_class: int, debug=False) -> None:
        super(PVASFF, self).__init__()
        self.num_class: int = num_class  # 468
        self.local_branches: int = cfg.model.local_branches  # 4
        self.device: torch.device = cfg.device
        self._debug: bool = debug
        self.attentions_use_one = cfg.attention.use_one
        self.no_part_backward = cfg.no_part_backward
        self.after_mask = False
        self.feature_length = 2048
        # build backbone (only supports resnet family so far)
        backbone: str = cfg.backbone.name
        if backbone.startswith('resnet'):
            # get configurations
            if backbone[-3] == '_v2':
                raise NotImplementedError  # haven't done yet
                backbone_args = {k: v for k, v in dict(cfg.backbone).items()
                                 if k in inspect.getfullargspec(ResNetV2).args}
            else:
                backbone_args = {k: v for k, v in dict(cfg.backbone).items()
                                 if k in inspect.getfullargspec(ResNet).args}
            # use pytorch-provided resnet method
            if cfg.backbone.name == 'resnet' or cfg.backbone.name == 'resnet_v2':  # build custom resnet
                if 'layers' in cfg.backbone:
                    if cfg.backbone.name == 'resnet':
                        self.backbone: nn.Module = ResNet(Prebuild_ResNet.Bottleneck, **backbone_args)
                    else:
                        self.backbone: nn.Module = ResNetV2(Prebuild_ResNet.BottleneckV2, **backbone_args)
                else:
                    raise ValueError(
                        "faild to build custom resnet because of missing essential argument: 'backbone.layers'")
            else:  # build pre-defined resnet
                self.backbone: Union[nn.Module, None] = getattr(Prebuild_ResNet, backbone, None)(**backbone_args)
                if self.backbone is None:
                    raise ValueError("failed to build resnet with specified name")

            # load pretrained weights
            if cfg.backbone.pretrained == True:
                if cfg.backbone.pretrained_path == "":
                    if backbone[-3:] == '_v2':
                        arch: str = backbone[:-3]  # use original resnet weight
                    else:
                        arch: str = backbone    # resnet50
                    if arch in resnet_model_urls:
                        pretrained_dict: OrderedDict = load_state_dict_from_url(resnet_model_urls[arch], progress=True)
                    else:
                        raise ValueError("cannot find specified pre-trained weights from pytorch website, "
                                         "please set 'model.pretrained = False' or specify a local checkpoint")
                else:
                    pretrained_dict: OrderedDict = torch.load(cfg.model.pretrained_path)
                backbone_dict: OrderedDict = self.backbone.state_dict()    # ResNet
                if backbone[-3:] == '_v2':
                    pretrained_dict = {k: v for k, v in pretrained_dict.items()
                                       if
                                       k in backbone_dict and 'bn' not in k}  # BN layer size changed: cannot load BN weights from pretrained weights
                else:
                    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in backbone_dict}
                backbone_dict.update(pretrained_dict)
                self.backbone.load_state_dict(backbone_dict)

                # freeze specified layer weights
                suspend = cfg.backbone.suspend
                if isinstance(suspend, int):
                    if suspend > 0:
                        suspend = list(range(suspend))
                    else:
                        suspend = []
                elif isinstance(suspend, Iterable):
                    if isinstance(suspend, range):
                        suspend = list(suspend)
                    if not _check_integer(suspend):
                        raise ValueError("excepted freeze options as collections of integer, "
                                         "specify layer index to be freezed")
                else:
                    raise TypeError("invalid freeze options")
                for index, child in enumerate(self.backbone.children()):
                    if index in suspend:
                        for name, param in child.named_parameters():
                            if not 'bn' in name:  # exclude batch normalize layers
                                param.requires_grad = False

            # identify number of output channels
            out_channels = -1
            out_channels = list(self.backbone.named_parameters())[-1][1].size(
                0)  # should be BatchNorm2d layer, size(0) = out_channels
            assert out_channels > 0, "there should be some output channels "  # should never assert this
            self.level_dims: List[int] = [out_channels // 4, out_channels // 2, out_channels]  # refer to ResNet paper
            self.backbone_stage1_out_channels: int = self.level_dims[0]
            self.backbone_stage1_out_shape: Tuple[int, int] = [cfg.data.train_size[0] // 8,
                                                               cfg.data.train_size[1] // 8]  # refer to ResNet paper
            self.backbone_out_channels: int = out_channels
            if cfg.backbone.apply_first_maxpool:
                self.backbone_out_shape: Tuple[int, int] = [cfg.data.train_size[0] // 32,
                                                            cfg.data.train_size[1] // 32]  # refer to ResNet paper
            else:
                self.backbone_out_shape: Tuple[int, int] = [cfg.data.train_size[0] // 16,
                                                            cfg.data.train_size[1] // 16]  # refer to ResNet paper
        else:
            raise NotImplementedError("specified backbone has not been implemented so far")
        self.backbone.to(cfg.device)

        # use attention
        self.att_layers = [256, 512, 1024]
        self._init_attention()

        # use multi scales
        self.branch1 = MultiScaleLayer_v2(512, num_class)
        self.branch2 = MultiScaleLayer_v2(1024, num_class)


        # build attention module
        self.fuser_out_channels = self.level_dims[-1]
        if cfg.model.attention:
            self.attention_enabled = True
            # 构造不同的attention模块  global attention
            if cfg.attention.name is 'ExternalAttention':
                attention_args = {"spa_feature_length": self.level_dims[-1],
                                  "batch_feature_length": self.backbone_out_shape[0] ** 2,
                                  "spa_S": cfg.attention.ExternalAttention_spa_S,
                                  "batch_S": cfg.attention.ExternalAttention_batch_S}
            elif cfg.attention.name is 'SEAttention':
                attention_args = {"channel": self.level_dims[-1],
                                  "reduction": cfg.attention.SEAttention_reduction}
            elif cfg.attention.name is 'ChannelSpatialAttentionModule':
                attention_args = {"channel": self.level_dims[-1],
                                  "reduction": cfg.attention.ChannelSpatialAttentionModule_reduction,
                                  "kernel_size": cfg.attention.ChannelSpatialAttentionModule_kernel_size}
            elif cfg.attention.name is 'SKAttention':
                attention_args = {"channel": self.level_dims[-1],
                                  "kernels": cfg.attention.SKAttention_kernels,
                                  "reduction": cfg.attention.SKAttention_reduction,
                                  "group": cfg.attention.SKAttention_group,
                                  "L": cfg.attention.SKAttention_L,
                                  }
            elif cfg.attention.name is 'BAMBlock':
                attention_args = {"channel": self.level_dims[-1],
                                  "reduction": cfg.attention.BAMBlock_reduction,
                                  "dia_val": cfg.attention.BAMBlock_dia_val,
                                  }
            elif cfg.attention.name is 'ECAAttention':
                attention_args = {"kernel_size": cfg.attention.ECAAttention_kernel_size, }
            elif cfg.attention.name is 'DAModule':
                attention_args = {"d_model": self.level_dims[-1],
                                  "kernel_size": cfg.attention.DAModule_kernel_size,
                                  "H": self.backbone_out_shape[0],
                                  "W": self.backbone_out_shape[1],
                                  }
            # local attention
            if cfg.attention.part_name is 'PartExternalAttention':
                part_attention_args = {"spa_feature_length": self.fuser_out_channels,
                                       "batch_feature_length": self.backbone_out_shape[0] ** 2,
                                       "spa_S": cfg.attention.ExternalAttention_spa_S,
                                       "batch_S": cfg.attention.ExternalAttention_batch_S}
            elif cfg.attention.part_name is 'PartSpatialAttention':
                part_attention_args = {"kernel_size": cfg.attention.ChannelSpatialAttentionModule_kernel_size}

            self.attentions = getattr(attention, cfg.attention.name)(**attention_args).to(self.device)
            self.part_attentions = nn.ModuleList(
                [getattr(attention, cfg.attention.part_name)(**part_attention_args) for
                 _ in range(self.local_branches)]).to(self.device)
        else:
            self.attention_enabled = False
        if cfg.model.pam:
            self.pam_enabled = True
            self.watcher = PartAttentionModule(num_parts=self.local_branches,
                                               num_in_features=self.fuser_out_channels,
                                               dropout_rate=cfg.pam.dropout_rate,
                                               rank=cfg.pam.rank,
                                               lowrank=cfg.pam.lowrank,
                                               fusion=cfg.pam.fusion,
                                               device=cfg.device,
                                               debug=debug)
        else:
            self.pam_enabled = False

        self.train_neck_feat = cfg.train_neck_feat
        self.test_neck_feat = cfg.test_neck_feat
        self.global_bottleneck = nn.BatchNorm1d(self.fuser_out_channels).to(cfg.device)
        self.global_bottleneck.bias.requires_grad_(False)
        self.local_bottleneck = nn.BatchNorm1d(self.fuser_out_channels).to(cfg.device)
        self.local_bottleneck.bias.requires_grad_(False)
        # self.local_bottleneck = nn.ModuleList(
        #     [nn.BatchNorm1d(self.fuser_out_channels) for _ in range(cfg.model.local_branches)]).to(cfg.device)
        # for local_bottleneck in self.local_bottleneck:
        #     local_bottleneck.bias.requires_grad_(False)

        self.global_classifier = nn.Linear(self.feature_length, num_class, bias=False).to(cfg.device)
        self.local_classifier = nn.Linear(self.feature_length, num_class, bias=False).to(cfg.device)

  #      self.triplet_attention = TripletAttention()
 #       self.triplet_attention.to(cfg.device)

        self.bottleneck_1 = nn.BatchNorm1d(sum(self.att_layers))
        self.bottleneck_1.bias.requires_grad_(False)  # no shift
        # weight init
        self.classifier_1 = nn.Linear(sum(self.att_layers), num_class, bias=False)
        self.classifier_1.apply(weights_init_classifier)
        # initialize
        for m in self.modules():
            if isinstance(m, nn.BatchNorm1d):
                weights_init_kaiming(m)
            if isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, std=0.001)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def _init_attention(self):
        self.att_modules = nn.ModuleList()
        for layer in self.att_layers:
            att_module = SELayer(layer)
            self.att_modules.append(att_module)

    def forward(self, image: torch.tensor, mask: torch.tensor, **kwargs) -> dict:
        '''Args:
            image:  4D tensor(BxCxHxW) - vehicle rgb(3 channels) image sequence for reid
            mask:   4D tensor(BxNxHxW) - part mask with 5 channels (wrt background, front, back, top, side)
        '''
        x: Tuple[torch.tensor, ...] = self.backbone(image)
        b_x = x[-1]
#        b_x = self.triplet_attention(b_x)
        x = self.backbone.conv1(x)
        x = self.backbone.bn1(x)
        x = self.backbone.relu(x)
        x = self.backbone.maxpool(x)    # msba ==> self._apply_first_maxpool true

        x = self.backbone.layer1(x)
        x, x_inv_1 = self.att_modules[0](x)

        x = self.backbone.layer2(x)
        x, x_inv_2 = self.att_modules[1](x)
        cls_score_br1 = self.branch1(x)

        x = self.backbone.layer3(x)
        x, x_inv_3 = self.att_modules[2](x)
        cls_score_br2 = self.branch2(x)

        x = self.backbone.layer4(x)

        # 将 x[-1] ( ,2048,14,14) ? 全部替换为 x
        debug_dict = {}
        mask = mask[:, 1:, :, :]  # remove background
        # 将mask变为特征图大小
        global_mask = [F.interpolate(torch.sum(mask, dim=1, keepdim=True),
                                     size=x.shape[2:], mode='nearest').squeeze()]  # (B, H, W)
        if self.no_part_backward:
            fused_feat: torch.tensor = [x]
            for i in range(self.local_branches):
                fused_feat.append(x.detach())
        else:
            fused_feat: torch.tensor = [x for _ in range(self.local_branches + 1)]  # (B, C, H, W)
        global_feat = []
        local_feat = []
        local_mask = []
        fused_feat_ = []

        if self.after_mask:
            # TODO 逻辑有问题  mask未放小
            fused_feat[0] = global_mask[0] * fused_feat[0]
            for i in range(1, len(fused_feat)):
                fused_feat[i] = fused_feat[i] * local_mask[0][:, i - 1, :, :]

        if self.attention_enabled:
            fused_feat_.append(self.attentions[0](fused_feat[0]))
            for i in range(len(self.part_attentions)):
                if self.attentions_use_one:
                    fused_feat_.append(self.attentions[0](fused_feat[i + 1]))
                else:
                    fused_feat_.append(self.part_attentions[i](fused_feat[i + 1]))
        else:
            for i in range(self.local_branches + 1):
                fused_feat_.append(fused_feat[i])

        # masked global avg pooling
        global_feat.append(mask_adaptive_avg_pool2d(fused_feat_[0], global_mask[0], 1).squeeze())  # (B, C)
        # masked local avg pooling
        local_mask.append(F.interpolate(mask, size=fused_feat[0].shape[2:], mode='nearest'))  # (B, N, H, W)
        for i in range(1, len(fused_feat_)):
            local_feat.append(mask_adaptive_avg_pool2d(fused_feat_[i], local_mask[0][:, i - 1, :, :], 1).squeeze())
        local_feat = [torch.stack(local_feat).permute(1, 2, 0)]

        global_bn_feat = []
        local_bn_feat = []

        global_bn_feat.append(self.global_bottleneck(global_feat[0]))    # Batchnorm
        local_bn_feat.append(self.local_bottleneck(local_feat[0]))
        # for i in range(self.local_branches):
        #     local_bn_feat.append(self.local_bottleneck[i](local_feat[i]))

        if self._debug:
            for i, (g_mask, l_mask, f_feat) in enumerate(zip(global_mask, local_mask, fused_feat)):
                debug_dict[f"gloabl_mask_{i + 1}"] = g_mask
                debug_dict[f"local_mask_{i + 1}"] = l_mask
                debug_dict[f"fused_feat_{i + 1}"] = f_feat[:, 0]

        if self.pam_enabled:
            if self._debug:
                local_bn_feats = [torch.stack(local_bn_feat).permute(1, 2, 0)]
                part_weights, sam_attn, size_attn, g_weights, attn_weights = self.watcher(
                    torch.cat(local_bn_feats, dim=1), mask)
                pam_debug_dict = {"size_attention": size_attn,
                                  "feat_attention": sam_attn,
                                  "gate_weights": g_weights,
                                  "attention_weights": attn_weights}
                debug_dict = {**debug_dict, **pam_debug_dict}
            else:
                part_weights = self.watcher(torch.cat(local_bn_feat, dim=1), mask)
        else:
            part_weights: torch.tensor = torch.sum(mask, dim=[2, 3]) + 1
            part_weights = F.normalize(part_weights, p=1)

        global_cls_score = [self.global_classifier(global_bn_feat[0])]
        fused_local_bn_feat = (local_bn_feat[0] * part_weights.unsqueeze(dim=1)).sum(dim=2)
        local_cls_score = [self.local_classifier(fused_local_bn_feat)]

        weight_dict = {"part_weights": part_weights}

        train_dict = {"global_cls_score": global_cls_score,
                      "local_cls_score": local_cls_score}

        # new
        lf_xent = []

        if self.training:
            x_inv = torch.cat((x_inv_1, x_inv_2, x_inv_3), dim=1)
            x_inv = self.bottleneck_1(x_inv)
            inv_score = self.classifier_1(x_inv)

            weights = np.asarray([#1.0, 1.0, # local features
                                1.0, 0.1,      # global + inv
                                0.03, 0.03     # multi-scale
                                ])
            test = global_cls_score[0]*1.0
            # lf_xent.extend([global_cls_score, inv_score, cls_score_br1, cls_score_br2] * weights)  # MMNet-a loss
            lf_xent.extend([global_cls_score[0]*1.0, inv_score*0.1, cls_score_br1*0.03, cls_score_br2*0.03])
            lf_xent_dict = {"lf_xent": lf_xent}
            if self.train_neck_feat == 'after':
                feats_dict = {"global_feat": global_bn_feat,
                              "local_feat": local_bn_feat}
            else:
                feats_dict = {"global_feat": global_feat,
                              "local_feat": local_feat}
        else:
            if self.test_neck_feat == 'after':
                feats_dict = {"global_feat": global_bn_feat,
                              "local_feat": local_bn_feat}
            else:
                feats_dict = {"global_feat": global_feat,
                              "local_feat": local_feat}

        return {**train_dict, **feats_dict, **debug_dict, **weight_dict, **lf_xent_dict}



if __name__ == '__main__':
    from models.configs.defaults import get_default_configs
    cfg = get_default_configs()
    model = PVASFF(cfg, 576)
    from models.tenten import image, mask

    # print(image.shape)
    # print(mask.shape)
    device = torch.device('cuda')
    image = image.to(device)
    mask = mask.to(device)
    output = model(image, mask)
    # global_feat = output["global_feat"]
    # global_score = output["global_cls_score"]
    # local_feat = output["local_feat"]
    # local_score = output["local_cls_score"]
#     vis_score = output["part_weights"]