from vehicle_reid_pytorch.loss.triplet_loss import normalize, euclidean_dist, hard_example_mining
from vehicle_reid_pytorch.models import Baseline
import torch
from torch import nn
import torch.nn.functional as F
from functools import reduce
from math_tools import clck_dist
from pprint import pprint
from vehicle_reid_pytorch.models.gau import *
from vehicle_reid_pytorch.models.uafm import UAFM, UAFM_ChAtten

class ParsingReidModel(Baseline):

    def __init__(self, num_classes, last_stride, model_path, neck, neck_feat, model_name, pretrain_choice, num_local_branches=4):
        super(ParsingReidModel, self).__init__(num_classes, last_stride, model_path, neck, neck_feat, model_name,
                                               pretrain_choice)

        # self.local_bn_neck = nn.BatchNorm1d(2048*num_local_branches)
        self.local_bn_neck = nn.BatchNorm1d(2048*num_local_branches)
        self.local_classifier = nn.Conv1d(2048, num_classes, 1)
        # 7/26
        # self.local_bottleneck = nn.BatchNorm1d(2048)
        # self.local_bottleneck.bias.requires_grad_(False)
        # self.local_classifier = nn.Linear(self.feature_length, num_class, bias=False)
        # self.gau = GAU(2048, 1024, 0)
        # self.gau1 = GAU(2048, 512, 1)
        # self.gau2 = GAU(2048, 256, 2)
        # if CONST_use_pam:
        #     self.pam_enabled = True
        #     self.watcher = PartAttentionModule(num_parts=self.local_branches,
        #                                        num_in_features=self.fuser_out_channels,
        #                                        dropout_rate=cfg.pam.dropout_rate,
        #                                        rank=cfg.pam.rank,
        #                                        lowrank=cfg.pam.lowrank,
        #                                        fusion=cfg.pam.fusion,
        #                                        device=cfg.device,
        #                                        debug=debug)
        # else:
        #     self.pam_enabled = False
        # self.watcher = PartAttentionModule(num_parts=4,
        #                                        num_in_features=2048,
        #                                        dropout_rate=0.0,
        #                                        rank=1,
        #                                        lowrank=False,
        #                                        fusion=True,
        #                                        debug=False)

        self.uafm = UAFM_ChAtten(2048, 1024, 2048)
        # self.uafm2 = UAFM_ChAtten(1024, 512, 1024)
        # self.uafm3 = UAFM_ChAtten(512, 256, 512)
 
        # self.uafm = UAFM(2048, 1024, 2048)

        # self.pos_embedding = nn.Parameter(torch.randn(1, 5, 2048))
        # self.vis_score = nn.Parameter(torch.randn(1, 5, 2048))
        self.hrcn_momo = HRCNHead()        # 9/29

        # self.instance_fc = FC(2048, 2048)

        # self.head_fc = FC(2048, 2048)
        # self.GDN = GDN(2048, 1)
        # self.layers = nn.Sequential(nn.Conv2d(2048, 1, kernel_size=1,groups=4, ),
        #               nn.BatchNorm2d(1),
        #               nn.ReLU(inplace=True))

        # self.layers1 = nn.Sequential(nn.Conv2d(2048, 2048, kernel_size=1),
        #               nn.BatchNorm2d(2048),
        #               nn.ReLU(inplace=True))


        # self.f_k = nn.Linear(4, 4)
        # self.f_q = nn.Linear(4, 4)
        # self.f_v = nn.Linear(4, 4)
        # self.c_o = nn.Linear(4, 4)

        # self.uafm1 = UAFM_ChAtten(1024, 512, 1024)
        # self.uafm2 = UAFM_ChAtten(512, 256, 512)


        # self.inceptionconv1 = IncepOctaveConvX(1024, 1024, alpha=0.5)
        # self.simself = SimplifiedScaledDotProductAttention(d_model=2048, h=8)

    def forward(self, image, mask, **kwargs):
        """

        :param torch.Tensor x: [B, 3, H, W]
        :param torch.Tensor mask: [B, N, H, W] front/back, side, window
        :return:
        """
        # Remove bg
        if mask is not None:
            mask = mask[:, 1:, :, :]
            B, N, H, W = mask.shape
        else:
            B, _, H, W = image.shape
            N = 4
            mask = image.new_zeros(B, 4, H, W)

        # x = self.base(image)
        features = self.base(image)
        res_x, res_y = features[-2], features[-1]
        # res_x, res_y = self.base(image)
        # print(res_y.shape)
        # print(res_x.shape)
        # raise "ssss"
        # res_z = features[1]
        # res_zzz = features[0]
        #print(res_x.shape)
        #raise "ssss"
        # res_000000 = self.uafm3(res_z, res_zzz)
        # res_1111 = self.uafm2(res_x, res_000000)
        res = self.uafm(res_y, res_x)    # 10/1

        # res = res_y
        # res2 = self.uafm2(features[1], features[0])
        # features[2] = self.inceptionconv1(features[2])
        # res1 = self.uafm1(features[2], features[1])
        # features[-1] = self.inceptionconv(features[-1])
        # res = self.uafm(features[-1], res1)
        # res = self.gau(res, features[2]) + res
        # res = self.gau1(res, features[1]) + res
        # res = self.gau2(res, features[0])  + res
        B, C, h, w = res.shape
        mask = F.interpolate(mask, res.shape[2:])
        # mask = F.softmax(mask, dim=1)
        # mask = F.adaptive_max_pool2d(mask, output_size=x.shape[2:]).view(B, N, h, w)

        global_feat = self.gap(res)  # (b, 2048, 1, 1)
        # print(global_feat.shape)
        # xinput = global_feat.squeeze(-1).permute(0, 2, 1)
        # global_feat = self.simself(xinput,xinput,xinput)
        # print(global_feat.shape)
        # raise "sss"
        global_feat = global_feat.view(
            global_feat.shape[0], -1)  # flatten to (bs, 2048)


        local_feat_map = torch.mul(mask.unsqueeze(
            dim=2), res.unsqueeze(dim=1))  # (B, N, C, h, w)
        # print(local_feat_map.shape)
        # raise "4444444"
        # local_feat_map = local_feat_map.view(B, -1, h, w)
        local_feat_map = local_feat_map.reshape(B, -1, h, w)

        # local_feat_tempa = F.adaptive_avg_pool2d(local_feat_map, output_size=(1, 1)).view(B, N, C).permute([0, 2, 1])
        # local_feat_bn_tempa= self.local_bottleneck(
        #         local_feat_tempa.contiguous().view(B, -1)).view(B, -1, N)

        vis_score = mask.sum(dim=[2, 3]) + 1  # Laplace平滑
        # part_weights = F.normalize(vis_score, p=1)
        # f = "/home/ubuntu/yuyu/pven6005/lucky.txt"

        # with open(f,"a") as file:   #只需要将之前的”w"改为“a"即可，代表追加内容
        #     file.write(str(F.normalize(vis_score, p=1)) + "\n")
        # print(str(F.normalize(vis_score, p=1)))
        # raise "sssss"
        """不同"""
        # part_weights = self.watcher(local_feat_bn_tempa, mask)
        # print(vis_score.shape)

        # feat = self.bottleneck(global_feat)
        # fused_local_bn_feat = (local_feat_bn_tempa * part_weights.unsqueeze(dim=1))  #.sum(dim=2)
        
 
        local_feat_before = F.adaptive_avg_pool2d(local_feat_map, output_size=(1, 1)).view(B, N, C).permute(
            [0, 2, 1]) * (h * w / vis_score.unsqueeze(dim=1))  # (B, C, N)
        
        momo_input = local_feat_before.permute(0, 2, 1)    #9/29
        # # vis_score = F.normalize(vis_score, p=1)    # 0.00 ``````     0.00  0.10  0.00   0.02
        # # part_weights = vis_score
        # #9/29
        # print(momo_input)
        # raise "sss"
        ree_gcn = self.hrcn_momo(momo_input, vis_score)    #.squeeze(-1)
        part_weights = F.softmax(ree_gcn, dim=1)
        # with open(f,"a") as file:   #只需要将之前的”w"改为“a"即可，代表追加内容
        #     file.write(str(part_weights) + "\n")
        # print(str(part_weights))

        # # print(vis_score.shape)
        # size_atn = torch.log(vis_score)
        # part_weights = F.softmax(size_atn, dim=1)
        # print(part_weights)
        # local_feat_before = ree_gcn.permute(0, 2, 1)
        # x_local = local_feat_before.permute(0,2,1)
        # instacne_representation = self.instance_fc(x_local)
        
        # _, group_prob = self.GDN(instacne_representation)
        # Pk = group_prob.expand(B, 4, 2048)
        # vg = self.head_fc(x_local)
        # res_mul = torch.mul(vg, Pk)
        # local_feat_before = local_feat_before + res_mul.permute(0,2,1)
        # print(local_feat_before.shape)
        # raise "sss"
        # x_local = local_feat_before
        # q = self.f_q(x_local)
        # k = self.f_k(x_local).permute(0, 2, 1)
        # v = self.f_v(x_local)

        # att = torch.matmul(q, k)  # / np.sqrt()
        # att = torch.softmax(att, -1)
        # o_x_local = torch.matmul(att, v).contiguous()
        # o_x_local = self.c_o(o_x_local)
        # # print(o_x_local.shape)
        # # raise "sss"
        # local_feat_before = local_feat_before + o_x_local


        # '''old version'''
        if self.neck == 'no':
            feat = global_feat
        elif self.neck == 'bnneck':
            # normalize for angular softmax
            feat = self.bottleneck(global_feat)
            local_feat = self.local_bn_neck(
                local_feat_before.contiguous().view(B, -1)).view(B, -1, N)  # 这一步会使其不为0
            # local_feat = local_feat_bn_tempa
        if self.training:
            cls_score = self.classifier(feat)
            # print(fused_local_bn_feat.shape)
            # raise "sss"
            local_cls_score = self.local_classifier(local_feat)
            # global feature for triplet loss
            return {"cls_score": cls_score,
                    "global_feat": global_feat,
                    "local_cls_score": local_cls_score,
                    "local_feat": local_feat,
                    # "vis_score": vis_score}
                    "vis_score": part_weights}

        else:
            cls_score = self.classifier(feat)    # for draw_heatmap
            if self.neck_feat == 'after':
                # print("Test with feature after BN")
                return {"cls_score": cls_score,                 # for draw_heatmap
                        "global_feat": feat,
                        "local_feat": local_feat,
                        # "vis_score": vis_score}
                        "vis_score": part_weights}
            else:
                # print("Test with feature before BN")
                return {"global_feat": global_feat, 
                         "local_feat": local_feat_before, 
                        #  "vis_score": vis_score}
                         "vis_score": part_weights}


class ParsingTripletLoss:
    def __init__(self, margin=None):
        self.margin = margin
        if margin is not None:
            self.ranking_loss = nn.MarginRankingLoss(margin=margin)
        else:
            self.ranking_loss = nn.SoftMarginLoss()

    def __call__(self, local_feat, vis_score, target, normalize_feature=False):
        """

        :param torch.Tensor local_feature: (B, C, N)
        :param torch.Tensor visibility_score: (B, N)
        :param torch.Tensor target: (B)
        :return:
        """
        B, C, _ = local_feat.shape
        if normalize_feature:
            local_feat = normalize(local_feat, 1)

        dist_mat = clck_dist(local_feat, local_feat,
                             vis_score, vis_score)

        dist_ap, dist_an = hard_example_mining(dist_mat, target)
        y = dist_an.new().resize_as_(dist_an).fill_(1)

        if self.margin is not None:
            loss = self.ranking_loss(dist_an, dist_ap, y)
        else:
            loss = self.ranking_loss(dist_an - dist_ap, y)

        return loss, dist_ap, dist_an


# def build_model(cfg, num_classes):
#     # if cfg.MODEL.NAME == 'resnet50':
#     #     model = Baseline(num_classes, cfg.MODEL.LAST_STRIDE, cfg.MODEL.PRETRAIN_PATH, cfg.MODEL.NECK, cfg.TEST.NECK_FEAT)
#     model = ParsingReidModel(num_classes, cfg.model.last_stride, cfg.model.pretrain_model, cfg.model.neck,
#                              cfg.test.neck_feat, cfg.model.name, cfg.model.pretrain_choice)
#     return model


if __name__ == '__main__':
    from tensorboardX import SummaryWriter

    dummy_input = torch.rand(4, 3, 224, 224)
    model = Baseline(576, 1, '/home/mengdechao/.cache/torch/checkpoints/resnet50-19c8e357.pth', 'bnneck', 'after',
                     'resnet50', 'imagenet')
    model.train()
    with SummaryWriter(comment="baseline") as w:
        w.add_graph(model, [dummy_input, ])
