# encoding: utf-8
"""
@author:  liaoxingyu
@contact: sherlockliao01@gmail.com
"""

from tkinter.messagebox import NO
import torch
import torch.nn.functional as F
from torch import nn

from fastreid.config import configurable
from fastreid.layers import *
from fastreid.layers import pooling, any_softmax
from fastreid.layers.weight_init import weights_init_kaiming, weights_init_classifier,weights_init_kaiming_msba, weights_init_classifier_msba
from .build import REID_HEADS_REGISTRY
from fastreid.modeling.heads.attentions import *
from fastreid.modeling.heads.cbam import *
from fastreid.modeling.heads.fpa import *


@REID_HEADS_REGISTRY.register()
class EmbeddingHead(nn.Module):
    """
    EmbeddingHead perform all feature aggregation in an embedding task, such as reid, image retrieval
    and face recognition

    It typically contains logic to

    1. feature aggregation via global average pooling and generalized mean pooling
    2. (optional) batchnorm, dimension reduction and etc.
    2. (in training only) margin-based softmax logits computation
    """

    @configurable
    def __init__(
            self,
            *,
            feat_dim,
            embedding_dim,
            num_classes,
            neck_feat,
            pool_type,
            cls_type,
            scale,
            margin,
            with_bnneck,
            norm_type
    ):
        """
        NOTE: this interface is experimental.

        Args:
            feat_dim:
            embedding_dim:
            num_classes:
            neck_feat:
            pool_type:
            cls_type:
            scale:
            margin:
            with_bnneck:
            norm_type:
        """
        super().__init__()

        # Pooling layer
        assert hasattr(pooling, pool_type), "Expected pool types are {}, " \
                                            "but got {}".format(pooling.__all__, pool_type)
        self.pool_layer = getattr(pooling, pool_type)()

        self.neck_feat = neck_feat
        self.MultiHeads = MultiHeads().to(torch.device("cuda"))
        # self.MultiHeads = None
        self.ca = ChannelAttention(2048)
        self.sa = SpatialAttention()
        # self.ca = None
        # self.sa = None
        # self.fpa = FPA(2048)
        self.fpa = None

        neck = []
        if embedding_dim > 0:
            neck.append(nn.Conv2d(feat_dim, embedding_dim, 1, 1, bias=False))
            feat_dim = embedding_dim

        if with_bnneck:
            neck.append(get_norm(norm_type, feat_dim, bias_freeze=True))

        # self.bottleneck = nn.Sequential(*neck)
        self.bottleneck = nn.BatchNorm1d(2048)
        self.bottleneck.bias.requires_grad_(False)
        # self.local_bottleneck = nn.BatchNorm1d(2560)
        # self.local_bottleneck.bias.requires_grad_(False)
        self.bottleneck_1 = nn.BatchNorm1d(1792)
        self.bottleneck_1.bias.requires_grad_(False)  # no shift
        self.classifier_1 = nn.Linear(1792, num_classes, bias=False)

        self.bottleneck_1.apply(weights_init_kaiming_msba)
        self.classifier_1.apply(weights_init_classifier_msba)

        # Classification head
        assert hasattr(any_softmax, cls_type), "Expected cls types are {}, " \
                                               "but got {}".format(any_softmax.__all__, cls_type)
        self.weight = nn.Parameter(torch.Tensor(num_classes, feat_dim))
        # new to sota **
        # self.weightx =  nn.Parameter(torch.Tensor(num_classes, 2560))
        self.cls_layer = getattr(any_softmax, cls_type)(num_classes, scale, margin)
        # self.lo_cls_layer = getattr(any_softmax, cls_type)(num_classes, scale, margin)  # new to sota **
        # self.classifier_1 = nn.Linear(2560, num_classes, bias=False)
        # self.classifier_g = nn.Linear(2048, num_classes, bias=False)
        self.reset_parameters()

    def reset_parameters(self) -> None:
        self.bottleneck.apply(weights_init_kaiming)
        # self.local_bottleneck.apply(weights_init_kaiming)
        # self.classifier_1.apply(weights_init_classifier)
        # self.classifier_g.apply(weights_init_classifier)
        nn.init.normal_(self.weight, std=0.01)

    @classmethod
    def from_config(cls, cfg):
        # fmt: off
        feat_dim      = cfg.MODEL.BACKBONE.FEAT_DIM
        embedding_dim = cfg.MODEL.HEADS.EMBEDDING_DIM
        num_classes   = cfg.MODEL.HEADS.NUM_CLASSES
        neck_feat     = cfg.MODEL.HEADS.NECK_FEAT
        pool_type     = cfg.MODEL.HEADS.POOL_LAYER
        cls_type      = cfg.MODEL.HEADS.CLS_LAYER
        scale         = cfg.MODEL.HEADS.SCALE
        margin        = cfg.MODEL.HEADS.MARGIN
        with_bnneck   = cfg.MODEL.HEADS.WITH_BNNECK
        norm_type     = cfg.MODEL.HEADS.NORM
        # fmt: on
        return {
            'feat_dim': feat_dim,
            'embedding_dim': embedding_dim,
            'num_classes': num_classes,
            'neck_feat': neck_feat,
            'pool_type': pool_type,
            'cls_type': cls_type,
            'scale': scale,
            'margin': margin,
            'with_bnneck': with_bnneck,
            'norm_type': norm_type
        }

    # def forward(self, features, targets=None, local_features=None):
    def forward(self, features, targets=None, x_inv_list=None, cls_score_br=None):
        """
        See :class:`ReIDHeads.forward`.
        """
        # local_cls_outputs = None
        # ban the global
        # new to sota **
        pool_feat = self.pool_layer(features)
        pool_feat = pool_feat.flatten(1)
        neck_feat = self.bottleneck(pool_feat)
        lf_xent = []
        if self.training:
            # x_inv = torch.cat(x_inv_list, dim=1)
            # x_inv = self.bottleneck_1(x_inv)
            # inv_score = self.classifier_1(x_inv)
            logits = F.linear(neck_feat, self.weight)
            cls_outputs = self.cls_layer(logits.clone(), targets)

            lf_xent.extend([cls_outputs*1.0])
            

        # if self.MultiHeads:
        #     pool_feat = self.ca(features)*features
        #     pool_feat = self.sa(pool_feat)*pool_feat
        #     pool_feat = self.pool_layer(pool_feat)
        #     pool_feat = pool_feat.flatten(1)
        #     _, final_feat, _, _ = self.MultiHeads(pool_feat)

        #     neck_feat = self.bottleneck(final_feat)
        # elif self.ca and self.sa:
        #     pool_feat = self.ca(features)*features
        #     pool_feat = self.sa(pool_feat)*pool_feat
        #     final_feat = self.pool_layer(pool_feat)
        #     final_feat = final_feat.flatten(1)
        #     neck_feat = self.bottleneck(final_feat)
        # elif self.fpa:
        #     pool_feat = self.fpa(features)
        #     final_feat = self.pool_layer(pool_feat)
        #     final_feat = final_feat.flatten(1)
        #     neck_feat = self.bottleneck(final_feat)
        # elif self.ca:
        #     pool_feat = self.ca(features)*features
        #     final_feat = self.pool_layer(pool_feat)
        #     final_feat = final_feat.flatten(1)
        #     neck_feat = self.bottleneck(final_feat)


        # # local  new to sota **
        # if self.training:
        #     local_pool_feat_list = [self.pool_layer(local_features[i]) for i in range(len(local_features))]
        #     local_pool_feats = torch.cat(local_pool_feat_list, dim=1)    # 1280  # 2560
        #     local_pool_feats = local_pool_feats.flatten(1)
        #     bt_local_pool_feats = self.local_bottleneck(local_pool_feats)
        #     local_cls_outputs = self.classifier_1(bt_local_pool_feats)
        # Evaluation
        # fmt: off
        if not self.training: return neck_feat
        # fmt: on

        # Training
        # new to sota **

        # try:
        #     cls_outputs = self.classifier_g(neck_feat)
        # except TypeError:
        #     cls_outputs = self.classifier_g(neck_feat, targets)


        # if self.cls_layer.__class__.__name__ == 'Linear':
        #     logits = F.linear(neck_feat, self.weight)
        #     # new to sota **
        #     # logitsx = F.linear(bt_local_pool_feats, self.weightx)
        # else:
        #     logits = F.linear(F.normalize(neck_feat), F.normalize(self.weight))
        #     # new to sota **
        #     # logitsx = F.linear(F.normalize(bt_local_pool_feats), F.normalize(self.weightx))

        # # Pass logits.clone() into cls_layer, because there is in-place operations
        # cls_outputs = self.cls_layer(logits.clone(), targets) 

        # new to sota **
        # local_cls_outputs = self.cls_layer(logitsx.clone(), targets)

        # fmt: off
        if self.neck_feat == 'before':  feat = pool_feat[..., 0, 0]
        elif self.neck_feat == 'after': feat = neck_feat
        else:                           raise KeyError(f"{self.neck_feat} is invalid for MODEL.HEADS.NECK_FEAT")
        # fmt: on

        return {
            # "cls_outputs": cls_outputs,
            "cls_outputs": lf_xent,
            # "pred_class_logits": logits.mul(self.cls_layer.s),
            "features": feat,
            # "local_cls_outputs": local_cls_outputs,
            # "bt_local_pool_feats": bt_local_pool_feats,
        }
