from vehicle_reid_pytorch.loss.triplet_loss import normalize, euclidean_dist, hard_example_mining
from vehicle_reid_pytorch.models import Baseline
import torch
from torch import nn
import torch.nn.functional as F
from functools import reduce
from math_tools import clck_dist
from pprint import pprint
# from vehicle_reid_pytorch.models.gau import *
from vehicle_reid_pytorch.models.uafm import UAFM_ChAtten
# CONST_use_pam = True
from numpy import prod
from vehicle_reid_pytorch.models.baseline import weights_init_classifier, weights_init_kaiming

# class PartAttentionModule(nn.Module):

#     def __init__(self, num_parts: int, num_in_features: int,
#                  dropout_rate=0.,
#                  rank=1, lowrank=False, fusion=True,
#                  debug: bool = False,
#                 #  device: torch.device = 'cuda') -> None:
#                  device: torch.device = 'cpu') -> None:
#         super(PartAttentionModule, self).__init__()

#         self.num_parts: int = num_parts
#         self.in_channels: int = num_in_features
#         self.out_channels: int = num_parts
#         self.dropout_rate: int = dropout_rate
#         self.device: torch.device = device
#         self._debug: bool = debug
#         self._lowrank: bool = lowrank
#         self._fusion: bool = fusion
#         if lowrank == False and not (rank == 1 or rank == num_parts):
#             raise ValueError()
#         else:
#             self.rank = rank

#         # self.shared_MLP = nn.Sequential(
#         #     nn.Conv1d(2048, 2048 // 8, 1, bias=False),
#         #     nn.ReLU(),
#         #     nn.Conv1d(2048 // 8, 4, 1, bias=False)
#         # ).to(device)
#         self.shared_MLP = nn.Sequential(
#             nn.Linear(4, 1, bias=False),
#             nn.ReLU(),
#             # nn.GELU(),
#             nn.Linear(1, 4, bias=False)
#         ).to(device)

#         # self.fc_final = nn.Linear(2048, 4).to(device)

#         if self._lowrank:
#             _expander = torch.ones(rank, num_parts,
#                                    device=device, dtype=torch.float32, requires_grad=True)
#             self.expander = nn.Parameter(_expander, requires_grad=True)
#         # self.dropout = nn.Dropout(p=dropout_rate).to(device)
#         if self._fusion:
#             self.fc1 = nn.Linear(num_parts, 1).to(device)
#             self.fc2 = nn.Linear(num_parts, 1).to(device)

#     def forward(self, x: torch.tensor, mask: torch.tensor) -> torch.tensor:
#         assert len(x.size()) == 3, f"excepted input tensor 'x' as 3D tensor of shape (B, C, N) " \
#                                    f"but got a {len(x.size())}D tensor instead"
#         assert x.size(2) == self.num_parts and x.size(1) == self.in_channels, \
#             f"excepted input tensor 'x' shape as (*, {self.in_channels}, {self.num_parts}), " \
#             f"but got input tensor with shape of (*, {x.size(1)}, {x.size(2)}) instead"
#         assert len(mask.size()) == 4, f"excepted mask tensor as 4D tensor of shape (B, N, H, W) " \
#                                       f"but got a {len(x.size())}D tensor instead"
#         assert mask.size(1) == self.num_parts, \
#             f"excepted mask tensor shape as (*, {self.num_parts}, *, *), " \
#             f"but got input tensor with shape of (*, {x.size(1)}, *, *) instead"

#         # feature attention
#         if self._lowrank:
#             weight = torch.matmul(self.weights, self.expander) ##  self.weights delete
#         else:
#             # print(x.permute(0,2,1).shape)
#             x_squeeze = nn.AdaptiveAvgPool1d(1)(x.permute(0,2,1))     # 64, 4, 1
#             # print(a.shape)
#             x_excitation = self.shared_MLP(x_squeeze.squeeze(-1))
#             # print(x_excitation.shape)
#             # raise "ssss"
#             # xs = torch.sum(x, dim=-1)
#             # avg_pool = F.adaptive_avg_pool1d(xs.unsqueeze(-1), 1)
#             # print(avg_pool.shape)
#             # raise "$$$"
#             # f_a = self.shared_MLP(xs)
#             # f_a = self.shared_MLP(avg_pool.squeeze(-1))
#             # f_a = self.fc_final(f_a)
# #            raise "ss"
#         # feat_attn = F.softmax(f_a.squeeze(-1), dim=1)
#         feat_attn = F.softmax(x_excitation, dim=1)
# #        feat_attn = F.softmax(torch.sum(feat_attn, dim=1), dim=1)
#         # print(feat_attn)
#         # size attention
#         # size_attn: torch.tensor = torch.sum(mask, dim=[2, 3]) + 1  # (B, N)
#         size_attn: torch.tensor = mask.sum(dim=[2, 3]) + 1
#         size_attn = F.normalize(size_attn, p=1)
#         # print(size_attn)
#         # print(torch.cat([self.fc1(feat_attn), self.fc2(size_attn)], dim=1).shape)
#         # raise "sss"
#         if self._fusion:
#             # gate mechanism
#             g_weight: torch.tensor = F.softmax(torch.cat([self.fc1(feat_attn), self.fc2(size_attn)], dim=1), dim=1)
#             #  feat_attn 特征权重   size_attn  面积权重
#             fused_attn: torch.tensor = g_weight[:, :1] * feat_attn + g_weight[:, 1:] * size_attn
#         else:
#             g_weight: torch.tensor = torch.tensor([[1., 0.]], device=self.device).expand(size_attn.size(0), -1)
#             fused_attn: torch.tensor = feat_attn

#         if self._debug:
#             return fused_attn, feat_attn, size_attn, g_weight, self.weights
#         else:
#             return fused_attn


class PartAttentionModule(nn.Module):

    def __init__(self, num_parts: int, num_in_features: int,
                 dropout_rate=0.,
                 rank=1, lowrank=False, fusion=True,
                 debug: bool = False,
                 device: torch.device = 'cpu') -> None:
        super(PartAttentionModule, self).__init__()

        self.num_parts: int = num_parts
        self.in_channels: int = num_in_features
        self.out_channels: int = num_parts
        self.dropout_rate: int = dropout_rate
        self.device: torch.device = device
        self._debug: bool = debug
        self._lowrank: bool = lowrank
        self._fusion: bool = fusion
        if lowrank == False and not (rank == 1 or rank == num_parts):
            raise ValueError()
        else:
            self.rank = rank

        _empty = torch.empty(num_in_features, rank,
                             device=device, dtype=torch.float32, requires_grad=True)
        nn.init.constant_(_empty, 1 / num_in_features)
        self.weights = nn.Parameter(_empty, requires_grad=True)
        # self.refine_feature = nn.Sequential(
        #     nn.Linear(num_in_features, num_in_features // 4),
        #     nn.Linear(num_in_features // 4, num_in_features)
        # ).to(device)
        if self._lowrank:
            _expander = torch.ones(rank, num_parts,
                                   device=device, dtype=torch.float32, requires_grad=True)
            self.expander = nn.Parameter(_expander, requires_grad=True)
        self.dropout = nn.Dropout(p=dropout_rate).to(device)
        if self._fusion:
            self.fc1 = nn.Linear(num_parts, 1).to(device)
            self.fc2 = nn.Linear(num_parts, 1).to(device)

    def forward(self, x: torch.tensor, mask: torch.tensor) -> torch.tensor:
        assert len(x.size()) == 3, f"excepted input tensor 'x' as 3D tensor of shape (B, C, N) " \
                                   f"but got a {len(x.size())}D tensor instead"
        assert x.size(2) == self.num_parts and x.size(1) == self.in_channels, \
            f"excepted input tensor 'x' shape as (*, {self.in_channels}, {self.num_parts}), " \
            f"but got input tensor with shape of (*, {x.size(1)}, {x.size(2)}) instead"
        assert len(mask.size()) == 4, f"excepted mask tensor as 4D tensor of shape (B, N, H, W) " \
                                      f"but got a {len(x.size())}D tensor instead"
        assert mask.size(1) == self.num_parts, \
            f"excepted mask tensor shape as (*, {self.num_parts}, *, *), " \
            f"but got input tensor with shape of (*, {x.size(1)}, *, *) instead"

        # feature attention
        if self._lowrank:
            weight = torch.matmul(self.weights, self.expander)
        else:
            weight = self.weights
        weight = self.dropout(weight)
        feat_attn: torch.tensor = x * weight
        feat_attn = F.softmax(torch.sum(feat_attn, dim=1), dim=1)

        # size attention
        size_attn: torch.tensor = torch.sum(mask, dim=[2, 3]) + 1  # (B, N)
        size_attn = F.normalize(size_attn, p=1)

        if self._fusion:
            # gate mechanism
            g_weight: torch.tensor = F.softmax(torch.cat([self.fc1(feat_attn), self.fc2(size_attn)], dim=1), dim=1)
            #  feat_attn 特征权重   size_attn  面积权重
            fused_attn: torch.tensor = g_weight[:, :1] * feat_attn + g_weight[:, 1:] * size_attn
        else:
            g_weight: torch.tensor = torch.tensor([[1., 0.]], device=self.device).expand(size_attn.size(0), -1)
            fused_attn: torch.tensor = feat_attn

        if self._debug:
            return fused_attn, feat_attn, size_attn, g_weight, self.weights
        else:
            return fused_attn


def _raise_errors(x: torch.tensor, mask: torch.tensor, dimensions: int) -> None:
    if x.device != mask.device:
        raise TypeError(f"input and mask must on the same device, "
                        f"got input on {x.device} and mask on {mask.device}")
    if len(x.size()) != dimensions + 2:
        raise ValueError(f"excepted input as a {dimensions + 2}D tensor (BxCxHxW), "
                         f"but got a {len(x.size())}D tensor instead")
    if len(mask.size()) != dimensions + 1:
        raise ValueError(f"excepted mask as a {dimensions + 1}D tensor (BxCxHxW), "
                         f"but got a {len(x.size())}D tensor instead")
    if x.size(0) != mask.size(0):
        raise ValueError(f"input and mask must have same batch size (dim0), "
                         f"got {x.size(0)} for input and {mask.size(0)} for mask")
    if x.size()[2:] != mask.size()[1:]:
        raise ValueError(f"input and mask must have same shape, "
                         f"got {x.size()[2:]} for input and {mask.size()[1:]} for mask")

class _MaskAdaptiveAvgPool_Base_impl(nn.Module):

    def __init__(self, _impl_name: str,
                #  output_size: _size_any_t) -> None:
                output_size) -> None:
        super(_MaskAdaptiveAvgPool_Base_impl, self).__init__()

        assert getattr(nn, _impl_name, None) is not None  # should never assert this
        self.pool: nn.Module = getattr(nn, _impl_name)(output_size)
        self.name: str = 'Mask' + _impl_name

        # self.output_size: _size_any_t = self.pool.output_size
        self.output_size = self.pool.output_size

    def forward(self, x: torch.tensor, mask: torch.tensor) -> torch.tensor:
        _raise_errors(x, mask, int(self.name[-2]))
        mask = mask.unsqueeze(dim=1)

        # calculation of kernel size 
        # reference [https://blog.csdn.net/u013382233/article/details/85948695]
        # if isinstance(self.output_size, _size_T):
        if isinstance(self.output_size, int):
            stride = [size // self.output_size for size in x.size()[2:]]
        else:
            stride = [size // output_size for size, output_size in zip(x.size()[2:], self.output_size)]
        kernel_size = [size - (self.output_size - 1) * s for size, s in zip(x.size()[2:], stride)]
        kernel_area = prod(kernel_size)

        x_PoolRes: torch.tensor = self.pool(x * mask)
        mask_PoolRes: torch.tensor = self.pool(mask) + 1. / kernel_area

        return x_PoolRes / mask_PoolRes

class MaskAdaptiveAvgPool2d(_MaskAdaptiveAvgPool_Base_impl):
    '''
    Mask Adaptive Average Pooling 2d
    Args:
        x    - torch.tensor - input tensor (BxCxHxW)
        mask - torch.tensor - mask tensor (BxHxW)
    Output:
        y    - torch.tensor - output tensor (BxCxHxW)
    '''

    # def __init__(self, output_size: _size_2_t) -> None:
    def __init__(self, output_size) -> None:
        super(MaskAdaptiveAvgPool2d, self).__init__('AdaptiveAvgPool2d', output_size)

def mask_adaptive_avg_pool2d(x: torch.tensor, mask: torch.tensor, output_size):
    return MaskAdaptiveAvgPool2d(output_size)(x, mask)



class ParsingReidModel(Baseline):

    def __init__(self, num_classes, last_stride, model_path, neck, neck_feat, model_name, pretrain_choice, num_local_branches=4):
        super(ParsingReidModel, self).__init__(num_classes, last_stride, model_path, neck, neck_feat, model_name,
                                               pretrain_choice)

        # self.local_bn_neck = nn.BatchNorm1d(2048*num_local_branches)
        # 7/25
        self.local_bottleneck = nn.BatchNorm1d(self.in_planes)
        self.local_bottleneck.bias.requires_grad_(False)  # no shift
        self.local_classifier = nn.Linear(
            self.in_planes, self.num_classes, bias=False)

        self.local_bottleneck.apply(weights_init_kaiming)
        self.local_classifier.apply(weights_init_classifier)
 
        # self.local_classifier = nn.Conv1d(2048, num_classes, 1)
        # self.gau = GAU(2048, 1024, 0)
        # self.gau1 = GAU(2048, 512, 1)
        # self.gau2 = GAU(2048, 256, 2)

        # self.uafm = UAFM_ChAtten(2048, 1024, 2048)
        # self.uafm1 = UAFM_ChAtten(1024, 512, 1024)
        # self.uafm2 = UAFM_ChAtten(512, 256, 512)
        # if CONST_use_pam:
        #     self.pam_enabled = True
        #     self.watcher = PartAttentionModule(num_parts=self.local_branches,
        #                                        num_in_features=self.fuser_out_channels,
        #                                        dropout_rate=cfg.pam.dropout_rate,
        #                                        rank=cfg.pam.rank,
        #                                        lowrank=cfg.pam.lowrank,
        #                                        fusion=cfg.pam.fusion,
        #                                        device=cfg.device,
        #                                        debug=debug)
        # else:
        #     self.pam_enabled = False
        self.watcher = PartAttentionModule(num_parts=4,
                                               num_in_features=2048,
                                               dropout_rate=0.0,
                                               rank=1,
                                               lowrank=False,
                                               fusion=True,
                                               debug=False)

        # self.uafm = UAFM_ChAtten(2048, 1024, 2048)

    def forward(self, image, mask, **kwargs):
        """

        :param torch.Tensor x: [B, 3, H, W]
        :param torch.Tensor mask: [B, N, H, W] front/back, side, window
        :return:
        """
        # Remove bg
        
        if mask is not None:
            mask = mask[:, 1:, :, :]
            B, N, H, W = mask.shape
        else:
            B, _, H, W = image.shape
            N = 4
            mask = image.new_zeros(B, 4, H, W)

        # x = self.base(image)
        features = self.base(image)
        res_x, res_y = features[-1], features[-2]
        res = res_x
        # res = self.uafm(res_x, res_y)
        # res = self.gau(res, features[2]) + res
        # res = self.gau1(res, features[1]) + res
        # res = self.gau2(res, features[0])  + res
        # res2 = self.uafm2(features[1], features[0])
        # res1 = self.uafm1(features[2], features[1])
        # res = self.uafm(features[-1], res1)

        B, C, h, w = res.shape
        # mask = F.interpolate(mask, res.shape[2:]) 
        # 7/25
        global_mask = F.interpolate(torch.sum(mask, dim=1, keepdim=True), size=res.shape[2:], mode='nearest').squeeze()  # (B, H, W)

        # global_feat = self.gap(res)  # (b, 2048, 1, 1)
        # global_feat = global_feat.view(
        #     global_feat.shape[0], -1)  # flatten to (bs, 2048)
        # 7/25
        global_feat = mask_adaptive_avg_pool2d(res, global_mask, 1).squeeze()  # (B, C)

        # local_feat_map = torch.mul(mask.unsqueeze(
        #     dim=2), res.unsqueeze(dim=1))  # (B, N, C, h, w)
        # local_feat_map = local_feat_map.view(B, -1, h, w)

        # local_feat_tempa = F.adaptive_avg_pool2d(local_feat_map, output_size=(1, 1)).view(B, N, C).permute([0, 2, 1])
        # local_feat_bn_tempa= self.local_bn_neck(
        #         local_feat_tempa.contiguous().view(B, -1)).view(B, -1, N)
        # 7/25
        local_feats = []
        local_mask = F.interpolate(mask, size=res.shape[2:], mode='nearest')  # (B, N, H, W)
        for i in range(1, 5):    # 4个局部
            local_feats.append(mask_adaptive_avg_pool2d(res, local_mask[:, i - 1, :, :], 1).squeeze())
        local_feat = torch.stack(local_feats).permute(1, 2, 0)  # 四合一 (2, 2048, 4)
        local_bn_feat = self.local_bottleneck(local_feat)
        # vis_score = mask.sum(dim=[2, 3]) + 1  # Laplace平滑
        # part_weights = F.normalize(vis_score, p=1)
        """不同"""
        part_weights = self.watcher(local_bn_feat, mask)
        # print(vis_score.shape)

        feat = self.bottleneck(global_feat)
        # local_feat = (local_feat_bn_tempa * part_weights.unsqueeze(dim=1))  #.sum(dim=2)
        # 7/25
        fused_local_bn_feat = (local_bn_feat * part_weights.unsqueeze(dim=1)).sum(dim=2)
        # local_cls_score = self.local_classifier(fused_local_bn_feat)
        # print(part_weights.shape)
        # raise "ss"

        # vis_score = part_weights

        # if self.pam_enabled:
        # if False:
            # if self._debug:
            #     local_bn_feats = [torch.stack(local_bn_feat).permute(1, 2, 0)]
            #     part_weights, sam_attn, size_attn, g_weights, attn_weights = self.watcher(
            #         torch.cat(local_bn_feats, dim=1), mask)
            #     pam_debug_dict = {"size_attention": size_attn,
            #                       "feat_attention": sam_attn,
            #                       "gate_weights": g_weights,
            #                       "attention_weights": attn_weights}
            #     debug_dict = {**debug_dict, **pam_debug_dict}
            # else:
                # part_weights = self.watcher(torch.cat(local_bn_feat, dim=1), mask)
        # else:
        #     part_weights: torch.tensor = torch.sum(mask, dim=[2, 3]) + 1
        #     part_weights = F.normalize(part_weights, p=1)



        # tempb = (h * w / vis_score.unsqueeze(dim=1))

        # print(tempb.shape)
        # print(mask.sum(dim=[2, 3]))
        # print(vis_score0)
        # print(tempb)
        # raise "sss"
        # local_feat_before = F.adaptive_avg_pool2d(local_feat_map, output_size=(1, 1)).view(B, N, C).permute(
        #     [0, 2, 1]) * (h * w / vis_score.unsqueeze(dim=1))  # (B, C, N)
        
        # local_feat_before = tempa * tempb
        

        # local_feat_before = tempa * part_weights.unsqueeze(dim=1)
        # print(local_feat_before0.sum(dim=1))
        # print('000000000000')
        # print(local_feat_before.sum(dim=1))
        # raise "sss"
        '''old version'''
        # if self.neck == 'no':
        #     feat = global_feat
        # elif self.neck == 'bnneck':
        #     # normalize for angular softmax
        #     feat = self.bottleneck(global_feat)
        #     local_feat = self.local_bn_neck(
        #         local_feat_before.contiguous().view(B, -1)).view(B, -1, N)  # 这一步会使其不为0

        if self.training:
            cls_score = self.classifier(feat)
            # local_cls_score = self.local_classifier(local_feat)
            # 7/25
            local_cls_score = self.local_classifier(fused_local_bn_feat)
            # global feature for triplet loss
            return {"cls_score": cls_score,
                    "global_feat": global_feat,
                    "local_cls_score": local_cls_score,
                    "local_feat": local_feat,
                    # "vis_score": vis_score}
                    "vis_score": part_weights}

        else:
            if self.neck_feat == 'after':
                # print("Test with feature after BN")
                return {"global_feat": feat,
                        "local_feat": local_feat,
                        # "vis_score": vis_score}
                        "vis_score": part_weights}
            else:
                # print("Test with feature before BN")
                return {"global_feat": global_feat, 
                         "local_feat": local_feat_before, 
                         "vis_score": vis_score}


class ParsingTripletLoss:
    def __init__(self, margin=None):
        self.margin = margin
        if margin is not None:
            self.ranking_loss = nn.MarginRankingLoss(margin=margin)
        else:
            self.ranking_loss = nn.SoftMarginLoss()

    def __call__(self, local_feat, vis_score, target, normalize_feature=False):
        """

        :param torch.Tensor local_feature: (B, C, N)
        :param torch.Tensor visibility_score: (B, N)
        :param torch.Tensor target: (B)
        :return:
        """
        B, C, _ = local_feat.shape
        if normalize_feature:
            local_feat = normalize(local_feat, 1)

        dist_mat = clck_dist(local_feat, local_feat,
                             vis_score, vis_score)

        dist_ap, dist_an = hard_example_mining(dist_mat, target)
        y = dist_an.new().resize_as_(dist_an).fill_(1)

        if self.margin is not None:
            loss = self.ranking_loss(dist_an, dist_ap, y)
        else:
            loss = self.ranking_loss(dist_an - dist_ap, y)

        return loss, dist_ap, dist_an


def build_model(cfg, num_classes):
    # if cfg.MODEL.NAME == 'resnet50':
    #     model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK, cfg.TEST.NECK_FEAT)
    model = ParsingReidModel(num_classes, cfg.model.last_stride, cfg.model.pretrain_model, cfg.model.neck,
                             cfg.test.neck_feat, cfg.model.name, cfg.model.pretrain_choice)
    return model


if __name__ == '__main__':
    from tensorboardX import SummaryWriter

    dummy_input = torch.rand(4, 3, 224, 224)
    model = Baseline(576, 1, '/home/mengdechao/.cache/torch/checkpoints/resnet50-19c8e357.pth', 'bnneck', 'after',
                     'resnet50', 'imagenet')
    model.train()
    with SummaryWriter(comment="baseline") as w:
        w.add_graph(model, [dummy_input, ])
