# inspired by PVEN - Parsing-based Viewaware Embedding Network 
# https://github.com/silverbulletmdc/PVEN
# paper: Parsing-based viewaware embedding network for vehicle ReID
# By Meng, Dechao and Li, Liang and Liu, Xuejing and Li, et al.
# [https://arxiv.org/abs/2004.05021]

__all__ = ['PVASFF']

import torch
import torch.nn as nn
import torch.nn.functional as F

import inspect

from .blocks import Prebuild_ResNet, ResNet, ResNetV2, resnet_model_urls
from .blocks import AdaptiveSpatialFeatureFusion as ASFF
from .blocks import PartAttentionModule
from .blocks import mask_adaptive_avg_pool2d, MaskAdaptiveAvgPool2d
from .blocks import mask_global_covariance_pool
from .blocks import attention
from torch.hub import load_state_dict_from_url

from collections import OrderedDict
from collections.abc import Iterable
from typing import Union, Optional, Tuple, List
from yacs.config import CfgNode

from .tools.valid_tools import list2dict


def _check_integer(_x: Iterable):
    for __x in _x:
        if not isinstance(__x, int):
            return False
    return True

def weights_init_kaiming(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')
        nn.init.constant_(m.bias, 0.0)

    elif classname.find('Conv') != -1:
        nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')
        if m.bias is not None:
            nn.init.constant_(m.bias, 0.0)
    elif classname.find('BatchNorm') != -1:
        if m.affine:
            nn.init.constant_(m.weight, 1.0)
            nn.init.constant_(m.bias, 0.0)



class PVASFF(nn.Module):

    def __init__(self, cfg: CfgNode, num_class: int, debug=False) -> None:
        super(PVASFF, self).__init__()
        self.num_class: int = num_class  # 468
        self.local_branches: int = cfg.model.local_branches  # 4
        self.device: torch.device = cfg.device
        self._debug: bool = debug
        self._full_scale_asff: bool = cfg.asff.full_scale  # False
        self.normal_feature = cfg.normal_feature
        self.test_normal_feature = cfg.normal_feature
        self.co_softmax = cfg.co_softmax
        self.attentions_use_one = cfg.attention.use_one
        self.no_part_backward = cfg.no_part_backward
        # build backbone (only supports resnet family so far)
        backbone: str = cfg.backbone.name
        if backbone.startswith('resnet'):
            # get configurations
            if backbone[-3] == '_v2':
                raise NotImplementedError  # haven't done yet
                backbone_args = {k: v for k, v in dict(cfg.backbone).items()
                                 if k in inspect.getfullargspec(ResNetV2).args}
            else:
                backbone_args = {k: v for k, v in dict(cfg.backbone).items()
                                 if k in inspect.getfullargspec(ResNet).args}
            # use pytorch-provided resnet method
            if cfg.backbone.name == 'resnet' or cfg.backbone.name == 'resnet_v2':  # build custom resnet
                if 'layers' in cfg.backbone:
                    if cfg.backbone.name == 'resnet':
                        self.backbone: nn.Module = ResNet(Prebuild_ResNet.Bottleneck, **backbone_args)
                    else:
                        self.backbone: nn.Module = ResNetV2(Prebuild_ResNet.BottleneckV2, **backbone_args)
                else:
                    raise ValueError(
                        "faild to build custom resnet because of missing essential argument: 'backbone.layers'")
            else:  # build pre-defined resnet
                self.backbone: Union[nn.Module, None] = getattr(Prebuild_ResNet, backbone, None)(**backbone_args)
                if self.backbone is None:
                    raise ValueError("failed to build resnet with specified name")

            # load pretrained weights
            if cfg.backbone.pretrained == True:
                if cfg.backbone.pretrained_path == "":
                    if backbone[-3:] == '_v2':
                        arch: str = backbone[:-3]  # use original resnet weight
                    else:
                        arch: str = backbone
                    if arch in resnet_model_urls:
                        pretrained_dict: OrderedDict = load_state_dict_from_url(resnet_model_urls[arch], progress=True)
                    else:
                        raise ValueError("cannot find specified pre-trained weights from pytorch website, "
                                         "please set 'model.pretrained = False' or specify a local checkpoint")
                else:
                    pretrained_dict: OrderedDict = torch.load(cfg.model.pretrained_path)
                backbone_dict: OrderedDict = self.backbone.state_dict()
                if backbone[-3:] == '_v2':
                    pretrained_dict = {k: v for k, v in pretrained_dict.items()
                                       if
                                       k in backbone_dict and 'bn' not in k}  # BN layer size changed: cannot load BN weights from pretrained weights
                else:
                    pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in backbone_dict}
                backbone_dict.update(pretrained_dict)
                self.backbone.load_state_dict(backbone_dict)

                # freeze specified layer weights
                suspend = cfg.backbone.suspend
                if isinstance(suspend, int):
                    if suspend > 0:
                        suspend = list(range(suspend))
                    else:
                        suspend = []
                elif isinstance(suspend, Iterable):
                    if isinstance(suspend, range):
                        suspend = list(suspend)
                    if not _check_integer(suspend):
                        raise ValueError("excepted freeze options as collections of integer, "
                                         "specify layer index to be freezed")
                else:
                    raise TypeError("invalid freeze options")
                for index, child in enumerate(self.backbone.children()):
                    if index in suspend:
                        for name, param in child.named_parameters():
                            if not 'bn' in name:  # exclude batch normalize layers
                                param.requires_grad = False

            # identify number of output channels
            out_channels = -1
            out_channels = list(self.backbone.named_parameters())[-1][1].size(
                0)  # should be BatchNorm2d layer, size(0) = out_channels
            assert out_channels > 0, "there should be some output channels "  # should never assert this
            self.level_dims: List[int] = [out_channels // 4, out_channels // 2, out_channels]  # refer to ResNet paper
            self.backbone_stage1_out_channels: int = self.level_dims[0]
            self.backbone_stage1_out_shape: Tuple[int, int] = [cfg.data.train_size[0] // 8,
                                                               cfg.data.train_size[1] // 8]  # refer to ResNet paper
            self.backbone_out_channels: int = out_channels
            if cfg.backbone.apply_first_maxpool:
                self.backbone_out_shape: Tuple[int, int] = [cfg.data.train_size[0] // 32,
                                                            cfg.data.train_size[1] // 32]  # refer to ResNet paper
            else:
                self.backbone_out_shape: Tuple[int, int] = [cfg.data.train_size[0] // 16,
                                                            cfg.data.train_size[1] // 16]  # refer to ResNet paper

        else:
            raise NotImplementedError("specified backbone has not been implemented so far")
        self.backbone.to(cfg.device)

        # build modified ASFF module
        if cfg.model.asff:
            self.asff_enabled = True
            cbam_params = list2dict(cfg.cbam.params)
            cbam_params['activation'] = cfg.cbam.activation
            if self._full_scale_asff:
                self.fuser1 = ASFF(level=0, level_dims=self.level_dims,
                                   interpolate=cfg.asff.interpolate,
                                   align_corners=cfg.asff.align_corners,
                                   weight_channels=cfg.asff.weight_channels,
                                   scale=[1., 2., 4.], device=cfg.device,
                                   debug=debug, **cbam_params)
                self.fuser2 = ASFF(level=1, level_dims=self.level_dims,
                                   interpolate=cfg.asff.interpolate,
                                   align_corners=cfg.asff.align_corners,
                                   weight_channels=cfg.asff.weight_channels,
                                   scale=[1., 2., 4.], device=cfg.device,
                                   debug=debug, **cbam_params)
                self.fuser3 = ASFF(level=2, level_dims=self.level_dims,
                                   interpolate=cfg.asff.interpolate,
                                   align_corners=cfg.asff.align_corners,
                                   weight_channels=cfg.asff.weight_channels,
                                   scale=[1., 2., 4.], device=cfg.device,
                                   debug=debug, **cbam_params)
                self.fuser_out_channels: int = self.level_dims[0] + self.level_dims[1] + self.level_dims[2]
            else:
                self.fuser = ASFF(level=2, level_dims=self.level_dims,
                                  interpolate=cfg.asff.interpolate,
                                  align_corners=cfg.asff.align_corners,
                                  weight_channels=cfg.asff.weight_channels,
                                  scale=[1., 2., 4.], device=cfg.device,
                                  debug=debug, **cbam_params)  # c = 512/1024/2048
                self.fuser_out_channels: int = self.level_dims[-1]
        else:
            self.asff_enabled = False
        # build attention module
        self.fuser_out_channels = self.level_dims[-1]
        if cfg.model.attention:
            self.attention_enabled = True
            # 构造不同的attention模块  global attention
            if cfg.attention.name is 'ExternalAttention':
                attention_args = {"spa_feature_length": self.level_dims[-1],
                                  "batch_feature_length": self.backbone_out_shape[0] ** 2,
                                  "spa_S": cfg.attention.ExternalAttention_spa_S,
                                  "batch_S": cfg.attention.ExternalAttention_batch_S}
            elif cfg.attention.name is 'SEAttention':
                attention_args = {"channel": self.level_dims[-1],
                                  "reduction": cfg.attention.SEAttention_reduction}
            elif cfg.attention.name is 'ChannelSpatialAttentionModule':
                attention_args = {"channel": self.level_dims[-1],
                                  "reduction": cfg.attention.ChannelSpatialAttentionModule_reduction,
                                  "kernel_size": cfg.attention.ChannelSpatialAttentionModule_kernel_size}
            elif cfg.attention.name is 'SKAttention':
                attention_args = {"channel": self.level_dims[-1],
                                  "kernels": cfg.attention.SKAttention_kernels,
                                  "reduction": cfg.attention.SKAttention_reduction,
                                  "group": cfg.attention.SKAttention_group,
                                  "L": cfg.attention.SKAttention_L,
                                  }
            elif cfg.attention.name is 'BAMBlock':
                attention_args = {"channel": self.level_dims[-1],
                                  "reduction": cfg.attention.BAMBlock_reduction,
                                  "dia_val": cfg.attention.BAMBlock_dia_val,
                                  }
            elif cfg.attention.name is 'ECAAttention':
                attention_args = {"kernel_size": cfg.attention.ECAAttention_kernel_size, }
            elif cfg.attention.name is 'DAModule':
                attention_args = {"d_model": self.level_dims[-1],
                                  "kernel_size": cfg.attention.DAModule_kernel_size,
                                  "H": self.backbone_out_shape[0],
                                  "W": self.backbone_out_shape[1],
                                  }

            # local attention
            if cfg.attention.part_name is 'PartExternalAttention':
                part_attention_args = {"spa_feature_length": self.fuser_out_channels,
                                       "batch_feature_length": self.backbone_out_shape[0] ** 2,
                                       "spa_S": cfg.attention.ExternalAttention_spa_S,
                                       "batch_S": cfg.attention.ExternalAttention_batch_S}
            elif cfg.attention.part_name is 'PartSpatialAttention':
                part_attention_args = {"kernel_size": cfg.attention.ChannelSpatialAttentionModule_kernel_size}

            self.attentions = nn.ModuleList(
                [getattr(attention, cfg.attention.name)(**attention_args)]).to(self.device)
            self.part_attentions = nn.ModuleList(
                [getattr(attention, cfg.attention.part_name)(**part_attention_args) for
                 _ in range(self.local_branches)]).to(self.device)
        else:
            self.attention_enabled = False
        if cfg.model.pam:
            self.pam_enabled = True
            self.watcher = PartAttentionModule(num_parts=self.local_branches,
                                               num_in_features=self.fuser_out_channels,
                                               dropout_rate=cfg.pam.dropout_rate,
                                               rank=cfg.pam.rank,
                                               lowrank=cfg.pam.lowrank,
                                               fusion=cfg.pam.fusion,
                                               device=cfg.device,
                                               debug=debug)
        else:
            self.pam_enabled = False

        # build feat normalize layer
        global_normalizer = []
        local_normalizer = []
        if self._full_scale_asff:
            for i in range(3):
                global_normalizer.append(nn.BatchNorm1d(self.level_dims[i]))
                local_normalizer.append(nn.BatchNorm1d(self.level_dims[i]))
        else:
            global_nor = nn.BatchNorm1d(self.fuser_out_channels)
            local_nor = nn.BatchNorm1d(self.fuser_out_channels)
            global_nor.bias.requires_grad_(False)
            local_nor.bias.requires_grad_(False)
            global_normalizer.append(global_nor)
            local_normalizer.append(local_nor)
        self.global_normalizer = nn.ModuleList(global_normalizer).to(cfg.device)
        self.local_normalizer = nn.ModuleList(local_normalizer).to(cfg.device)

        if self.co_softmax:
            self.global_classifier = torch.nn.Parameter(torch.empty(self.level_dims[-1], num_class, device=self.device))
            nn.init.xavier_uniform_(self.global_classifier, gain=1.414)  # Glorot初始化
            self.local_classifier = torch.nn.Parameter(torch.empty(self.level_dims[-1], num_class, device=self.device))
            nn.init.xavier_uniform_(self.local_classifier, gain=1.414)  # Glorot初始化
            self.global_classifier_k = torch.nn.Parameter(torch.empty(1, 1, device=self.device))
            nn.init.xavier_uniform_(self.global_classifier_k, gain=1.414)  # Glorot初始化
            self.local_classifier_k = torch.nn.Parameter(torch.empty(1, 1, device=self.device))
            nn.init.xavier_uniform_(self.local_classifier_k, gain=1.414)  # Glorot初始化
        else:
            # build classifier
            global_classifier = []
            local_classifier = []
            for i in range(len(self.global_normalizer)):
                global_classifier.append(
                    nn.Linear(self.level_dims[-len(self.global_normalizer) + i], num_class, bias=False))
                local_classifier.append(
                    nn.Linear(self.level_dims[-len(self.global_normalizer) + i], num_class, bias=False))

            self.global_classifier = nn.ModuleList(global_classifier).to(cfg.device)
            self.local_classifier = nn.ModuleList(local_classifier).to(cfg.device)

        # initialize
        for m in self.modules():
            if isinstance(m, nn.BatchNorm1d):
                weights_init_kaiming(m)
            if isinstance(m, nn.Conv1d):
                nn.init.xavier_uniform_(m.weight, gain=1)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            if isinstance(m, nn.Linear):
                nn.init.normal_(m.weight, std=0.001)
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)

    def forward(self, image: torch.tensor, mask: torch.tensor, **kwargs) -> dict:
        '''Args:
            image:  4D tensor(BxCxHxW) - vehicle rgb(3 channels) image sequence for reid
            mask:   4D tensor(BxNxHxW) - part mask with 5 channels (wrt background, front, back, top, side)
        '''





        image_shape = image.shape
        mask_shape = mask.shape
        y = self.backbone(image)
        x: Tuple[torch.tensor, ...] = y
        debug_dict = {}

        mask = mask[:, 1:, :, :]  # remove background
        if self.asff_enabled:
            if self._full_scale_asff:
                global_mask = []
                for i in range(3):
                    global_mask.append(F.interpolate(torch.sum(mask, dim=1, keepdim=True),
                                                     size=x[i].shape[2:], mode='nearest').squeeze())  # (B, H, W)
                if self._debug:
                    fused_feat1, asff_weight1 = self.fuser1(x)
                    fused_feat2, asff_weight2 = self.fuser2(x)
                    fused_feat3, asff_weight3 = self.fuser3(x)
                    asff_debug_dict = {"asff_weights_lv0": asff_weight1,
                                       "asff_weights_lv1": asff_weight2,
                                       "asff_weights_lv2": asff_weight3}
                    debug_dict = {**debug_dict, **asff_debug_dict}
                else:
                    fused_feat1 = self.fuser1(x)  # (B, C, H, W)
                    fused_feat2 = self.fuser2(x)  # (B, C, H, W)
                    fused_feat3 = self.fuser3(x)  # (B, C, H, W)
                fused_feat = [fused_feat1, fused_feat2, fused_feat3]
            else:
                global_mask = [F.interpolate(torch.sum(mask, dim=1, keepdim=True),
                                             size=x[-1].shape[2:], mode='nearest').squeeze()]  # (B, H, W)
                if self._debug:
                    fused_feat, asff_weight = self.fuser(x)
                    asff_debug_dict = {"asff_weights": asff_weight}
                    debug_dict = {**debug_dict, **asff_debug_dict}
                else:
                    fused_feat = self.fuser(x)  # (B, C, H, W)
                fused_feat = [fused_feat]
        else:
            # 将mask变为特征图大小
            global_mask = [F.interpolate(torch.sum(mask, dim=1, keepdim=True),
                                         size=x[-1].shape[2:], mode='nearest').squeeze()]  # (B, H, W)
            if self.no_part_backward:
                fused_feat: torch.tensor = [x[-1]]
                for i in range(self.local_branches):
                    fused_feat.append(x[-1].detach())
            else:
                fused_feat: torch.tensor = [x[-1] for _ in range(self.local_branches+1)]  # (B, C, H, W)
        global_feat = []
        local_feat = []
        local_mask = []
        fused_feat_ = []

        if self.attention_enabled:
            fused_feat_.append(self.attentions[0](fused_feat[0]))
            for i in range(len(self.part_attentions)):
                if self.attentions_use_one:
                    fused_feat_.append(self.attentions[0](fused_feat[i]))
                else:
                    fused_feat_.append(self.part_attentions[i](fused_feat[i+1]))
        else:
            for i in range(self.local_branches + 1):
                fused_feat_.append(fused_feat[i])

        # masked global avg pooling
        global_feat.append(mask_adaptive_avg_pool2d(fused_feat_[0], global_mask[0], 1).squeeze())  # (B, C)
        # masked local avg pooling
        local_mask.append(F.interpolate(mask, size=fused_feat[0].shape[2:], mode='nearest'))  # (B, N, H, W)
        for i in range(1, len(fused_feat_)):
            local_feat.append(mask_adaptive_avg_pool2d(fused_feat_[i], local_mask[0][:, i - 1, :, :], 1).squeeze())
        local_feat = [torch.stack(local_feat).permute(1, 2, 0)]
        # for i in range(len(global_mask)):
        #     H, W = fused_feat[i].shape[2:]
        #     # masked global avg pooling
        #     global_feat.append(mask_adaptive_avg_pool2d(fused_feat[i], global_mask[i], 1).squeeze())  # (B, C)
        #
        #     local_mask.append(F.interpolate(mask, size=fused_feat[i].shape[2:], mode='nearest'))  # (B, N, H, W)
        #     reproduced_feat: torch.tensor = fused_feat[i].unsqueeze(dim=1).repeat(  # (B, 1, C, H, W)
        #         1, self.local_branches, 1, 1, 1).view(  # (B, N, C, H, W)
        #         -1, self.level_dims[-len(fused_feat) + i], H, W)  # (B*N, C, H, W)
        #
        #     # masked local avg pooling
        #     # TODO
        #     local_feat.append(mask_adaptive_avg_pool2d(
        #         reproduced_feat, local_mask[-1].view(-1, H, W), 1).squeeze().view(  # (B, N, C)
        #         -1, self.local_branches, self.level_dims[-len(fused_feat) + i]).permute([0, 2, 1]))  # (B, C, N)
        # print(local_feat[0].shape)
        global_bn_feat = []
        local_bn_feat = []
        for i in range(len(global_feat)):
            global_bn_feat.append(self.global_normalizer[i](global_feat[i]))
            local_bn_feat.append(self.local_normalizer[i](local_feat[i]))
        if self.normal_feature:
            if self.training:
                feats_dict = {"global_feat": global_bn_feat,
                              "local_feat": local_bn_feat}
            else:
                feats_dict = {"global_feat": torch.cat(global_bn_feat, dim=1),
                              "local_feat": torch.cat(local_bn_feat, dim=1)}
        else:
            if self.training:
                feats_dict = {"global_feat": global_feat,
                              "local_feat": local_feat}
            else:
                if self.test_normal_feature:
                    feats_dict = {"global_feat": torch.cat(global_bn_feat, dim=1),
                                  "local_feat": torch.cat(local_bn_feat, dim=1)}
                else:
                    feats_dict = {"global_feat": torch.cat(global_feat, dim=1),
                                  "local_feat": torch.cat(local_feat, dim=1)}

        if self._debug:
            for i, (g_mask, l_mask, f_feat) in enumerate(zip(global_mask, local_mask, fused_feat)):
                debug_dict[f"gloabl_mask_{i + 1}"] = g_mask
                debug_dict[f"local_mask_{i + 1}"] = l_mask
                debug_dict[f"fused_feat_{i + 1}"] = f_feat[:, 0]

        if self.pam_enabled:
            if self._debug:
                part_weights, sam_attn, size_attn, g_weights, attn_weights = self.watcher(
                    torch.cat(local_bn_feat, dim=1), mask)
                pam_debug_dict = {"size_attention": size_attn,
                                  "feat_attention": sam_attn,
                                  "gate_weights": g_weights,
                                  "attention_weights": attn_weights}
                debug_dict = {**debug_dict, **pam_debug_dict}
            else:
                part_weights = self.watcher(torch.cat(local_bn_feat, dim=1), mask)
        else:
            part_weights: torch.tensor = torch.sum(mask, dim=[2, 3]) + 1
            part_weights = F.normalize(part_weights, p=1)
        weight_dict = {"part_weights": part_weights}
        if self.training:
            global_cls_score = []
            local_cls_score = []
            if self.co_softmax:
                l2_global_classifier = F.normalize(self.global_classifier, dim=1, p=2)
                global_cls_score_ = torch.matmul(global_bn_feat[0], l2_global_classifier)
                b, f = global_cls_score_.shape
                global_cls_score.append(global_cls_score_ * self.global_classifier_k.repeat(b, f))

                l2_local_classifier = F.normalize(self.global_classifier, dim=1, p=2)
                fused_local_bn_feat: torch.tensor = (local_bn_feat[0] * part_weights.unsqueeze(dim=1)).sum(dim=2)
                local_cls_score_ = torch.matmul(fused_local_bn_feat, l2_local_classifier)
                b, f = local_cls_score_.shape
                local_cls_score.append(local_cls_score_ * self.local_classifier_k.repeat(b, f))
            else:
                for i in range(len(self.global_classifier)):
                    global_cls_score.append(self.global_classifier[i](global_bn_feat[i]))
                    fused_local_bn_feat: torch.tensor = (local_bn_feat[i] * part_weights.unsqueeze(dim=1)).sum(dim=2)
                    local_cls_score.append(self.local_classifier[i](fused_local_bn_feat))
            train_dict = {"global_cls_score": global_cls_score,
                          "local_cls_score": local_cls_score}
            return {**train_dict, **feats_dict, **debug_dict, **weight_dict}
        else:
            return {**feats_dict, **debug_dict, **weight_dict}
