import torch
from utils.km import run_kuhn_munkres

import pdb
import numpy as np
class MPLP(object):
    def __init__(self,t=0.6,cfg=None):
        self.device = cfg.DEVICE
        self.t = t
        self.topk = 10
        self.mem0=cfg.MEM0
        self.dataset_name = cfg.INPUT.DATASET

    def predict_ml(self, memory, targets,epoch=None):
        if epoch<1 and self.mem0:
            #多标签只是自己
            targets = torch.unsqueeze(targets, 1)
            multilabel = torch.zeros([len(targets),len(memory)]).to(self.device)
            multilabel.scatter_(1, targets, float(2))
            return multilabel
        
        mem_vec = memory[targets]
        mem_sim = mem_vec.mm(memory.t())
        m, n = mem_sim.size()
        mem_sim_sorted, index_sorted = torch.sort(mem_sim, dim=1, descending=True) # a_searched list
        multilabel = torch.zeros(mem_sim.size()).to(self.device)
        mask_num = torch.sum(mem_sim_sorted > self.t, dim=1)

        for i in range(m):
            topk = int(mask_num[i].item()) # a_searched 大于self.t的个数
            if self.dataset_name =='CUHK-SYSU':
                topk = min(topk, 10) # 取前topk一定在10以内，对于ssm来讲每个ID只有2-3个boxes，如果出现10个近邻则肯定预测不准
            topk_idx = index_sorted[i, :topk]
            vec = memory[topk_idx] #a_searched vec
            sim = vec.mm(memory.t()) # b_searched  shape = [topk,all_mem]
            _, idx_sorted = torch.sort(sim.detach().clone(), dim=1, descending=True) # b_searched 的vecs的smi,降序
            step = 1
            for j in range(topk):   # b_searched中某个b_search_list，判断其中topk是否含有a，互近邻
                pos = torch.nonzero(idx_sorted[j] == index_sorted[i, 0]).item() #判断a在b_searched 中排在第几位
                if pos > topk:
                    break # 不在b_searched topk中，则step=2
                step = max(step, j) # a_searched topk中到第几个都是互近邻，topk中0:j是互近邻
            step = step + 1
            step = min(step, topk) # a_searched个数，step取最小值，如果是互近邻则分配多标签，
            if step <= 0:
                continue
            multilabel[i, index_sorted[i, 0:step]] = float(1) # 将互近邻的a_search结果标记为1

        targets = torch.unsqueeze(targets, 1) # 将自己也设置成多标签中的1
        multilabel.scatter_(1, targets, float(2)) #自己所在的位置是2

        return multilabel

    def predict_try(self, memory, targets,epoch): # 相似度阈值会改变，随着模型拟合，离群点更远，同ID更近，t太大则将pos也判别为离群点
        # 因为所有实例总数不变，相似度随模型会更新，所以按照噪声比例取相似度阈值
        # 验证用已经训练好的模型 再次计算dis矩阵，top2——list的噪声比例阈值
        # 以下计算导致显存溢出
        # memory=torch.tensor(memory,dtype=torch.float16) #memory.float16
        # dis=memory.mm(memory.t())
        # #对角线置0
        # diag = torch.diag(dis)
        # a_diag=torch.diag_embed(diag)
        # dis = dis - a_diag
        # top2_smi_list = torch.max(dis,0)[0]
        # top2_smi_list_sorted = torch.sort(top2_smi_list)[0]
        # noisy_partion_ratio=0.2
        # index_t = int(noisy_partion_ratio*len(top2_smi_list_sorted))
        # del dis,mem_fp16,a_diag,diag
        # self.t = top2_smi_list_sorted[index_t]/100
        # print(self.t)
        use_ss = False
        use_mplp = False
        use_un_oem=False
        if epoch<2 and use_un_oem:
            #多标签只是自己
            targets = torch.unsqueeze(targets, 1)
            multilabel = torch.zeros([len(targets),len(memory)]).to(self.device)
            multilabel.scatter_(1, targets, float(1))
            return multilabel
        else:
            #mplp 方法
            # targets中有重复的single id，这里变换为set再进行预测mplp，
            targets = targets.cpu().numpy()  # 已经是减去 1 的
            org_tar = targets
            targets = sorted(list(set(targets)))
            targets = torch.tensor(targets).to(self.device) # 变换后的targets

            mem_vec = memory[targets]
            mem_sim = mem_vec.mm(memory.t())
            m, n = mem_sim.size()  # 0是列 1是行
            mem_sim_sorted, index_sorted = torch.sort(mem_sim, dim=1, descending=True)  # a_searched list，第一是自己
            multilabel = torch.zeros(mem_sim.size()).to(self.device)
            mask_num = torch.sum(mem_sim_sorted > self.t, dim=1)

            for i in range(m):  # 怎么剔除噪音，如何确定 memory中的vec是否是噪音，一：相似度小于阈值，二：没有互近邻
                if int(mask_num[i].item()) == 1:  # ss内只有自己存在 相似度小于阈值self.t = 0.6， 0.7
                    # print('离群点噪声')
                    continue
                else:
                    topk = int(mask_num[i].item())  # a_searched 大于self.t的个数
                    # topk = max(topk, 10) # prw 10 原文的max，即使大于self.t的个数不够10，也要将pos设置为10
                    # topk = min(topk, 20)  # 如果小于10则用10, ssm 和 prw 都使用
                    topk_idx = index_sorted[i, :topk]
                    vec = memory[topk_idx]  # a_searched vec
                    sim = vec.mm(memory.t())  # b_searched  shape = [topk,all_mem]

                    if use_mplp:
                        # 看 b-search中有没有 a存在 ，互近邻查找
                        _, idx_sorted = torch.sort(sim.detach().clone(), dim=1, descending=True)  # b_searched 的vecs的smi,降序
                        step = 1
                        for j in range(topk):  # b_searched中某个b_search_list，判断其中topk是否含有a，互近邻
                            pos = torch.nonzero(idx_sorted[j] == index_sorted[i, 0]).item()  # 判断a在b_searched 中排在第几位
                            if pos > topk:  # 当topk中无a 则此样本为不是pos，
                                break  # 不在b_searched topk中
                            step = max(step, j)  # a_searched topk中到第几个都是互近邻，topk中0:j是互近邻
                        if step == 1:  # 没有互近邻存在则为负样本
                            continue
                        step = step + 1
                        step = min(step, topk)  # a_searched个数，step取最小值，如果是互近邻则分配多标签，
                        # print(i, ': org:{}, new:{}'.format(mask_num[i], step))

                    elif use_ss:
                        step=topk

                    multilabel[i, index_sorted[i, 0:step]] = float(1)  # 将互近邻的a_search结果标记为1

            targets = torch.unsqueeze(targets, 1)  # 将自己也设置成多标签中的1
            multilabel.scatter_(1, targets, float(2))  # 自己所在的位置是2 ， 如果是离群点则后续直接计算oem。

            #重新还原每个target的mplp
            multilabel_set = {targets[i].item(): multilabel[i] for i in range(len(targets))}
            multilabel_all = torch.zeros(len(org_tar), len(memory)).to(self.device)
            for j, id in enumerate(org_tar):
                multilabel_all[j] = multilabel_set[id]

        return multilabel_all
        
    def predict_single_label(self, memory, targets):
        #single label 标签只是自己
        targets = torch.unsqueeze(targets, 1)
        multilabel = torch.zeros([len(targets),len(memory)]).to(self.device)
        multilabel.scatter_(1, targets, float(2))
        return multilabel

    def predict_ml_with_ssm_gt(self, memory, targets,epoch,gt_multilabel=None): # 相似度阈值会改变，随着模型拟合，离群点更远，同ID更近，t太大则将pos也判别为离群点
        if epoch<1 and self.mem0:
            #多标签只是自己
            targets = torch.unsqueeze(targets, 1)
            multilabel = torch.zeros([len(targets),len(memory)]).to(self.device)
            multilabel.scatter_(1, targets, float(2))
            return multilabel

        mem_vec = memory[targets]
        mem_sim = mem_vec.mm(memory.t())
        m, n = mem_sim.size()  # 0是列 1是行
        mem_sim_sorted, index_sorted = torch.sort(mem_sim, dim=1, descending=True)  # a_searched list，第一是自己
        multilabel = torch.zeros(mem_sim.size()).to(self.device)
        # self.t = self.t+0.2 * epoch/6
        mask_num = torch.sum(mem_sim_sorted > self.t, dim=1)

        for i in range(m):  # 怎么剔除噪音，如何确定 memory中的vec是否是噪音，一：相似度小于阈值，二：没有互近邻
            if gt_multilabel is not None:
                if len(gt_multilabel[targets[i].item()]) == 1:  # no id的
                    if int(mask_num[i].item()) == 1:  # ss内只有自己存在 相似度小于阈值self.t = 0.6
                        # print('离群点噪声')
                        continue
                    else:
                        topk = int(mask_num[i].item())  # a_searched 大于self.t的个数
                        if self.dataset_name == 'CUHK-SYSU':
                            topk = min(topk, 10)
                        topk_idx = index_sorted[i, :topk]
                        vec = memory[topk_idx]  # a_searched vec
                        sim = vec.mm(memory.t())  # b_searched  shape = [topk,all_mem]

                        # 看 b-search中有没有a存在 ，互近邻查找
                        _, idx_sorted = torch.sort(sim.detach().clone(), dim=1,descending=True)  # b_searched 的vecs的smi,降序
                        step = 1
                        for j in range(topk):  # b_searched中某个b_search_list，判断其中topk是否含有a，互近邻
                            pos = torch.nonzero(idx_sorted[j] == index_sorted[i, 0]).item()  # 判断a在b_searched 中排在第几位
                            if pos > topk:  # 当topk中无a 则此样本为不是pos，
                                break  # 不在b_searched topk中
                            step = max(step, j)  # a_searched topk中到第几个都是互近邻，topk中0:j是互近邻
                        if step == 1:  # 没有互近邻存在则为负样本
                            continue
                        step = step + 1
                        step = min(step, topk)  # a_searched个数，step取最小值，如果是互近邻则分配多标签，
                        # print(i, ': org:{}, new:{}'.format(mask_num[i], step))
                        multilabel[i, index_sorted[i, 0:step]] = float(1)  # 将互近邻的a_search结果标记为1
                else:
                    multilabel[i, gt_multilabel[targets[i].item()]] = float(1)
            else:
                if int(mask_num[i].item()) <= 1:  # ss内只有自己存在 相似度小于阈值self.t = 0.6
                    # print('离群点噪声')
                    continue
                else:
                    topk = int(mask_num[i].item())  # a_searched 大于self.t的个数
                    if self.dataset_name == 'CUHK-SYSU':
                        # topk = min(topk, 10)
                        topk = min(topk, 5)
                    else:
                        topk = min(topk, 250)
                    topk_idx = index_sorted[i, :topk]
                    vec = memory[topk_idx]  # a_searched vec
                    sim = vec.mm(memory.t())  # b_searched  shape = [topk,all_mem]

                    # 看 b-search中有没有a存在 ，互近邻查找
                    _, idx_sorted = torch.sort(sim.detach().clone(), dim=1, descending=True)  # b_searched 的vecs的smi,降序
                    step = 1
                    for j in range(topk):  # b_searched中某个b_search_list，判断其中topk是否含有a，互近邻
                        pos = torch.nonzero(idx_sorted[j] == index_sorted[i, 0]).item()  # 判断a在b_searched 中排在第几位
                        if pos > topk:  # 当topk中无a 则此样本为不是pos，
                            break  # 不在b_searched topk中
                        step = max(step, j)  # a_searched topk中到第几个都是互近邻，topk中0:j是互近邻
                    if step == 1:  # 没有互近邻存在则为负样本
                        continue
                    step = step + 1
                    step = min(step, topk)  # a_searched个数，step取最小值，如果是互近邻则分配多标签，
                    # print(i, ': org:{}, new:{}'.format(mask_num[i], step))
                    multilabel[i, index_sorted[i, 0:step]] = float(1)  # 将互近邻的a_search结果标记为1

        targets = torch.unsqueeze(targets, 1)  # 将自己也设置成多标签中的1
        multilabel.scatter_(1, targets, float(2))  # 自己所在的位置是2 ， 如果是离群点则后续直接计算oem。

        return multilabel

    def predict_gtids_mul(self,memory, targets,epoch,gt_multilabel):

        mem_vec = memory[targets]
        mem_sim = mem_vec.mm(memory.t())
        m, n = mem_sim.size()  # 0是列 1是行
        multilabel = torch.zeros(mem_sim.size()).to(self.device)
        for i in range(m):  # 怎么剔除噪音，如何确定 memory中的vec是否是噪音，一：相似度小于阈值，二：没有互近邻
            if len(gt_multilabel[targets[i].item()])==1: #no id的
                multilabel[i, gt_multilabel[targets[i].item()]] = float(2)
            else:
                multilabel[i, gt_multilabel[targets[i].item()]] = float(1)

        return multilabel

    def predict_G(self, memory, targets):
        '''
        paper: un person reid via mlp and cla based on Graph-Structural insight
        通过ab各自的sim-list之间的距离判断，筛选多标签
        再prw上跑了以下效果不明显，因为对与ss的方法上提升就已经比较难，多标签坏到好排序 single-Class，ss，MPLP，GT
        '''
        mem_vec = memory[targets]
        # Condition - all features are normalised to |x|=1
        node_sim = mem_vec.mm(memory.t())  # similarity matrix
        all_sim = memory.mm(memory.t()).fill_diagonal_(0.0)  # matrix for neightborhood similiarty
        n_sim_batch = torch.cdist(all_sim[targets], all_sim, p=2.0) # a_sim 和 b_sim距离越近则ab越可能为同ID，与自己最近
        node_sim[node_sim < self.t] = -1.0 # 不被连接的边
        m, n = node_sim.size()  # m is scale of batch, n is the number of images on memory.
        node_sim_sorted, index_sorted = torch.sort(node_sim, dim=1, descending=True)# a_sim= a_searched list

        n_sim_sorted, index_n_sim_sorted = torch.sort(n_sim_batch, dim=1, descending=False) # a_sim_vec和all_sim_vec距离的 list
        nodeclass = torch.zeros(node_sim.size()).to(self.device)
        mask_num = torch.sum(node_sim_sorted != -1.0, dim=1)  # listing candiate using node similiarity.

        _pos_list = []
        for i in range(m):
            topk = int(mask_num[i].item())
            topk = max(topk, self.topk)
            topk_idx = index_sorted[i, :topk]
            # print('node distance based:')
            # print(topk_idx)
            topk_idx_nbased = index_n_sim_sorted[i, :topk + 1]# 取a_sim,b_sim属于all_sim中 向量距离最近的前topk
            # print('Neighborhood similairty based:')
            # print(topk_idx_nbased)
            if mask_num[i].item() == 1: # 离群点 只有自己是近邻 大于 阈值0.6以上
                nodeclass[i, targets[i]] = float(1)
                _pos_list.append(targets[i])
            else:
                nodeclass[i, targets[i]] = float(1)
                for j in range(topk):
                    if topk_idx[j] in topk_idx_nbased: # 取两个中搜索结果的topk中的交集作为 multilabel
                        continue
                    else:
                        topk_idx[j] = -1
                tmp = topk_idx[topk_idx != -1]
                _pos_list.append(tmp)
                # print('[%d] similarity %d/%d (%.3f)'%(i,len(tmp),topk,len(tmp)/topk))
                nodeclass[i, topk_idx[topk_idx != -1]] = float(1) # 剔除非交集 一些不够相似的 候选正例
                nodeclass[i, targets[i]] = float(1)
        targets = torch.unsqueeze(targets, 1)
        nodeclass.scatter_(1, targets, float(1))
        return nodeclass

    def predict_CBGM(self,memory, targets,annotations,k1=30,k2=4,r = 0.01):
        '''
        cbgm,不是直接用每张图中的最大sim_box 做预测，而是优化 每张图中与query最相似的box，
        return multilabel: postive label bigger than self.t,
        '''
        targets = targets.cpu().numpy()

        annos =annotations

        name_to_det_feat = {}  # G中每张图里的box和feat
        id_to_name_ids={} # all pids len: name, ids

        for anno in annos:
            name = anno["img_name"]
            pids= anno['pids']
            pids=[x-1 for x in pids]
            feats=memory[pids]
            pids = np.array(pids)
            name_to_det_feat[name] = [feats,pids]

            for id in pids:
                id_to_name_ids[id]={'img_name':name,'pids':pids}

        query_ids_set = sorted(list(set(targets)))
        multilabel_set ={} # id: torch.zeros((1,len(memory))).to(self.device)

        for i in range(len(query_ids_set)):
            query_sims_score=[]
            query_sims_id_list=[]

            query_sims_score_org = []
            query_sims_id_list_org = []

            query_id = query_ids_set[i]

            multilabel_set[query_id] = torch.zeros((1, len(memory))).to(self.device)
            multilabel_set[query_id]=multilabel_set[query_id].squeeze(dim=0)

            query_feat = memory[query_id]

            query_imname = id_to_name_ids[query_id]["img_name"]
            query_pids = id_to_name_ids[query_id]["pids"]
            query_feats= memory[query_pids]

            # 遍历每个gallery的图像，计算query和每个图像对应的G_boxes的sim
            name2sim = {}
            sims = []
            id_as_sims =[]
            imgs_cbgm = []

            for gallery_imname, [g_feats,g_pids] in name_to_det_feat.items():
                if query_imname==gallery_imname:
                    continue
                sim = query_feat.unsqueeze(dim=0).mm(g_feats.t()).squeeze(dim=0).cpu().numpy() # 与同一张图中所有ID的sims

                name2sim[gallery_imname] = sim
                sims.extend(list(sim))
                id_as_sims.extend(list(g_pids))
                imgs_cbgm.extend([gallery_imname] * len(sim))

                # 此处为不重排的query的正近邻
                inds = np.argsort(sim)[::-1][0]  # [::-1]是reverse排序，从大到小 取最大sim，所以是【0】，
                sim_most = sim[inds]
                g_pid_most = g_pids[inds]
                query_sims_score_org.append(sim_most)
                query_sims_id_list_org.append(g_pid_most)

            show_org_pos=True
            if show_org_pos:
                query_sims_score_org = np.array(query_sims_score_org)
                index_search = np.argsort(query_sims_score_org)  # small -bigger
                index_search = index_search.astype(np.int32)
                query_sims_id_list_org = np.array(query_sims_id_list_org)
                query_sims_id_list_org = query_sims_id_list_org[index_search]
                query_sims_score_org=query_sims_score_org[index_search]
                lab_indx = query_sims_score_org > self.t

                topk_id_org = query_sims_id_list_org[lab_indx]
                topk_sims_org = query_sims_score_org[lab_indx]
                topk = sum(lab_indx)

            topk_id_org_mplp = []
            hard_neg=[]

            if len(topk_id_org) == 0:  # 离群点
                topk_id_org_mplp = [query_id]

                sims = np.array(sims)
                id_as_sims = np.array(id_as_sims)

                new_sims_sorted_ind = np.argsort(np.array(sims))[::-1]
                new_sims_sorted_ind = new_sims_sorted_ind.astype(np.int32)
                new_ids = np.array(id_as_sims)
                new_ids_sorted_r_top = new_ids[new_sims_sorted_ind][:int(r * len(new_ids))]
                hard_neg.extend(list(new_ids_sorted_r_top))

            use_mplp=True
            if use_mplp and len(topk_id_org)!=0:
                # 搜索topk_id的近邻topk中是不是存在 query，是 则 为 pos，不是加入hard neg
                b_feats = memory[topk_id_org]
                b_sims = b_feats.mm(memory.t())
                b_topk_id = torch.argsort(b_sims,dim=1,descending=True)
                b_topk_id = b_topk_id[:,:topk]

                for a_top_id,b_ids in zip(topk_id_org,b_topk_id):
                    if a_top_id in b_ids:
                        topk_id_org_mplp.append(a_top_id)
                    else:
                        hard_neg.append(a_top_id)

                sims=np.array(sims)
                id_as_sims=np.array(id_as_sims)

                # self.t中除了topk_id_org的候选样本，的0.01比例的ids
                new_sims=[]
                new_ids=[]
                for sim,id in zip(sims,id_as_sims):
                    if id not in topk_id_org:
                        new_sims.append(sim)
                        new_ids.append(id)
                    else:
                        continue
                new_sims_sorted_ind = np.argsort(np.array(new_sims))[::-1]
                new_sims_sorted_ind=new_sims_sorted_ind.astype(np.int32)
                new_ids=np.array(new_ids)
                new_ids_sorted_r_top = new_ids[new_sims_sorted_ind][:int(r * len(new_ids))]
                hard_neg.extend(list(new_ids_sorted_r_top))

            # print('pos num:{}, neg num: {}'.format(len(topk_id_org_mplp),len(hard_neg)))
            # print(topk_id_org_mplp)
            # print(hard_neg)

            use_cbgm=False
            if use_cbgm:
                # 重新计算sim
                sims = np.array(sims)  # 不包含query所在图像的sims
                imgs_cbgm = np.array(imgs_cbgm)  # 如果不进行 cbgm优化则直接以此 sims的pids作为正样本
                inds = np.argsort(sims)[-k1:]  # sim最大的前K1= 30个 boxes
                imgs_cbgm = set(imgs_cbgm[inds])  # 这K1 个boxes存在 其中多个boxes处于同一个img的情况
                # 有可能 有重复，所有这里做set

                # 对前30个 gallery图像中最相似的boxes 的sim
                for img in imgs_cbgm:
                    sim = name2sim[img]
                    feat_g, g_pids = name_to_det_feat[img]
                    qfeats = query_feats[:k2] if len(query_feats) > k2 else query_feats  # k2 = 4

                    graph = []  # qf 和 gf中的id两两相连；
                    for indx_i, pfeat in enumerate(qfeats):  #
                        for indx_j, gfeat in enumerate(feat_g):
                            graph.append((indx_i, indx_j, (pfeat * gfeat).sum()))
                    km_res, max_val = run_kuhn_munkres(graph)  # 重点，返回有效的连接的 graph 节点和edge

                    for indx_i, indx_j, _ in km_res:
                        if indx_i == 0:
                            sim[indx_j] = max_val  # 将 sim[]更新为优化后的sim， 这里对name2sim更新了
                            break  # 断点调试，发现 只对很少部分某图像中有两个相似的boxes 的难负样本匹配有效，但这对于初级model没有效果

                for gallery_imname, sim in name2sim.items():
                    feat_g, g_pids = name_to_det_feat[gallery_imname]
                    inds = np.argsort(sim)[::-1][0]  # 对 前 30张 sim优化后的排序boxes及id，
                    sim_most = sim[inds]
                    g_pid_most = g_pids[inds]

                    query_sims_score.append(sim_most)
                    query_sims_id_list.append(g_pid_most)

                query_sims_score = np.array(query_sims_score)
                index_search = np.argsort(query_sims_score)  # small -bigger
                index_search = index_search.astype(np.int32)
                query_sims_id_list = np.array(query_sims_id_list)
                query_sims_id_list = query_sims_id_list[index_search]

                lab_indx = query_sims_score > self.t

                topk_id = query_sims_id_list[lab_indx]
                topk_sims = query_sims_score[lab_indx]

                # 对比   topk_id 和 topk_id_org 不同，是否有改进
                print('o', topk_id_org)
                print('n', topk_id)
                print('o', topk_sims_org)
                print('n', topk_sims)

                # 可以再这里增加 互近邻

            multilabel_set[query_id][query_id] = float(1)
            multilabel_set[query_id][topk_id_org_mplp]=float(1)

            multilabel_set[query_id][hard_neg] = float(-1)

        multilabel = torch.zeros(len(targets),len(memory)).to(self.device)

        for j,id in enumerate(targets):
            multilabel[j]=multilabel_set[id]
        return multilabel

    def predict_psknn_gpu(self, memory, targets, annotations, k1=30, k2=4, r=0.01):
        '''
        cbgm,不是直接用每张图中的最大sim_box 做预测，而是优化 每张图中与query最相似的box
        return multilabel: postive label bigger than self.t,
        '''
        annos = annotations

        name_to_det_feat = {}  # G中每张图里的box和feat
        id_to_name_ids = []  # all pids len: name, ids

        for anno in annos:
            name = anno["img_name"]
            pids = anno['pids']
            pids = [x - 1 for x in pids]  # 因为在检测时背景是0，所以最初ID是从1开始，然后再全部减1得到0 .。。N
            name_to_det_feat[name] = pids

            id_to_name_ids_temp=[name for pid in pids]
            id_to_name_ids.extend(id_to_name_ids_temp)

        targets = targets.cpu().numpy() # 已经是减去 1 的
        query_ids_set = sorted(list(set(targets)))
        multilabel_set = {}  #字典k:v =  id: torch.zeros((1,len(memory))).to(self.device)
        sims_qids=memory[query_ids_set].mm(memory.t()) # 最直接的是 这里用qid的knn近邻做pos样本，二互近邻 三每张最近邻

        for i in range(len(query_ids_set)):
            query_id = query_ids_set[i]

            multilabel_set[query_id] = torch.zeros((1, len(memory))).to(self.device).squeeze(dim=0)

            query_imname = id_to_name_ids[query_id]

            # 遍历每个gallery的图像，计算query和每个图像对应的G_boxes的sim
            query_sims_score_org = [] # = len(trainset)
            query_sims_id_list_org = []

            for j,(gallery_imname, g_pids) in enumerate(name_to_det_feat.items()):
                if query_imname == gallery_imname: # 如果是本图则最相似的是自己，且sim = 1
                    sim_most = float(1)
                    g_pid_most = query_id
                else:
                    sim = sims_qids[i][g_pids]  # q与一张图中所有boxes的sims

                    # 此处为不重排的query的正近邻
                    ind_max = torch.argmax(sim)  # torch.argmax返回下标
                    sim_most = sim[ind_max]
                    g_pid_most = g_pids[ind_max]

                query_sims_score_org.append(sim_most)
                query_sims_id_list_org.append(g_pid_most)

            a_search_sims = torch.tensor(query_sims_score_org).to(self.device)
            a_search_pids = torch.tensor(query_sims_id_list_org).to(self.device)
            # a sim 排序
            a_sims_sorted,a_ind = torch.sort(a_search_sims, dim=0, descending=True)
            a_ind = list(a_ind.cpu().numpy())
            a_search_pids =a_search_pids[a_ind]
            topk = torch.sum(a_sims_sorted > self.t, dim=0) # 有可能是1：只有自己是互近邻，说明是离群点，做单分类

            #如果直接取 top 大于 t 的pids
            pos_ids_org = a_search_pids[:topk] # 包含 自己

            pos_ids = []
            hard_neg = []

            # 互近邻 ：搜索topk_id的近邻topk中是不是存在 query，是为pos，不是加入hard neg
            b_feats = memory[pos_ids_org]
            b_sims = b_feats.mm(memory.t())

            b_topk_id = torch.argsort(b_sims, dim=1, descending=True)[:, :topk]

            for a_top_id, b_ids in zip(pos_ids_org, b_topk_id):
                if query_id in b_ids:
                    pos_ids.append(a_top_id.item())
                else:
                    hard_neg.append(a_top_id.item())
            # self.t中除了topk_id_org的候选样本，的0.01比例的ids
            print(i,': org:{},new:{} cha_neg:{}'.format(len(pos_ids_org),len(pos_ids),len(pos_ids_org)-len(pos_ids)))

            #分配多标签为1 是pos
            multilabel_set[query_id][pos_ids] = float(1)
            # multilabel_set[query_id][hard_neg] = float(-1)

        multilabel = torch.zeros(len(targets), len(memory)).to(self.device)
        for j, id in enumerate(targets):
            multilabel[j] = multilabel_set[id]
        return multilabel

    def predict_soft_multilabel(self, memory, targets):  # 相似度阈值会改变，随着模型拟合，离群点更远，同ID更近，t太大则将pos也判别为离群点
        #因为没有做最大值抑制 预测的box有很多都是同一个ID，但是同一个ID只做一次multilabel预测就行了，所以先做targets进行set
        targets = targets.cpu().numpy()  # 已经是减去 1 的
        org_tar = targets
        targets = sorted(list(set(targets)))
        targets = torch.tensor(targets).to(self.device)

        mem_vec = memory[targets]
        mem_sim = mem_vec.mm(memory.t())
        m, n = mem_sim.size()  # 0是列 1是行
        mem_sim_sorted, index_sorted = torch.sort(mem_sim, dim=1, descending=True)  # a_searched list，第一是自己
        multilabel = torch.zeros(mem_sim.size()).to(self.device)
        mask_num = torch.sum(mem_sim_sorted > self.t, dim=1)

        for i in range(m):  # 怎么剔除噪音，如何确定 memory中的vec是否是噪音，一：相似度小于阈值，二：没有互近邻
            if int(mask_num[i].item()) == 1:  # 只有自己与自己相似度大于阈值， 其他 相似度小于阈值self.t = 0.6， 0.7
                # print('离群点噪声')# 并且后面进行hard single label 的标记
                continue
            else:
                topk = int(mask_num[i].item())  # a_searched 大于self.t的个数 大于等于 2
                topk_idx = index_sorted[i, :topk]
                vec = memory[topk_idx]  # a_searched vec
                sim = vec.mm(memory.t())  # b_searched  shape = [topk,all_mem]
                # 看 b-search中有没有 a存在 ，互近邻查找
                _, idx_sorted = torch.sort(sim.detach().clone(), dim=1, descending=True)  # b_searched 的vecs的smi,降序
                step = 1
                for j in range(topk):  # b_searched中某个b_search_list，判断其中topk是否含有a，互近邻
                    pos = torch.nonzero(idx_sorted[j] == index_sorted[i, 0]).item()  # 判断a在b_searched 中排在第几位
                    if pos > topk:  # 当queryID 在 b_search的排位在topk之后，则该b不是pos，
                        break  # 不在b_searched topk中
                    step = max(step, j)  # a_searched topk中到第几个都是互近邻，topk中0:j是互近邻
                if step == 1:  # 如果上述查找无果，则step还是1 则同multil_num=1一样，后续负值hard single label
                    continue

                #互近邻筛查过后的pos，step小于 multil_num[i]
                step = step + 1 # 切片时保证互近邻都被取到
                # print(i, ': org:{}, new:{}'.format(topk, step))
                multilabel[i, index_sorted[i, 0:step]] = float(1)  # pos的概率真实值为1 ，但是soft后sim最小的标签也要赋值比它大

        targets = torch.unsqueeze(targets, 1)  # 将维
        multilabel.scatter_(1, targets, float(2))  # 自己所在的位置是2 ， 如果是离群点则后续直接计算oem。

        multilabel_set = {targets[i].item(): multilabel[i] for i in range(len(targets))}

        multilabel_all = torch.zeros(len(org_tar), len(memory)).to(self.device)
        for j, id in enumerate(org_tar):
            multilabel_all[j] = multilabel_set[id]

        return multilabel_all

