

import numpy as np
import torch
import faiss

class GirRetrievalEvaluator():
    def __init__(self,instance_path):
        self._faiss_index = None
        self._recall_values = [1,5,10]
        self._features_dim = 4096
        
        # path = "/root/ws/logs/models/gir_mirb_r50_triloss_ins.20240215/instance_features.pth"
        path = instance_path
        res = torch.load(path)
        # self._all_db_ids = res["all_ids"].cpu().numpy()
        # self._all_db_features = res["all_features"].cpu().numpy()
        # self._init_faiss(self._all_db_ids,self._all_db_features)
        if type(res) is torch.Tensor:
            center_feats = res.cpu().numpy()
            self._center_ids = np.array([k for k in range(res.shape[0])])
            self._center_faiss_index = faiss.IndexFlatL2(res.shape[-1])
            self._center_faiss_index.add(center_feats)
            self._predictions = []
        else:
            # mean
            center_data = res["mean"]
            
            ks = [k for k in center_data.keys()]
            self._center_ids = np.array(ks)
            center_feats = np.concatenate([np.expand_dims(v.cpu().numpy(),axis=0) for v in center_data.values()],axis=0)
            self._center_faiss_index = faiss.IndexFlatL2(self._features_dim)
            self._center_faiss_index.add(center_feats)
            self._predictions = []
        pass
    
    def _init_faiss(self,all_ids,all_features):
        
        self._faiss_index = faiss.IndexFlatL2(self._features_dim)
        self._faiss_index.add(all_features)
        
    def process_retrieval_ins_feat(self,instances,topk = 10):
        
        self.process_retrieval_ins_feat_by_center(instances,topk)
        return 
        ###
        ###  instance_r.pred_boxes_ins_mask = valid_box_index
        ###  instance_r.pred_boxes_ins_feature = pred_feature
        perd_feats = instances.pred_boxes_ins_feature.numpy()
        perd_feats_insflag = instances.pred_boxes_ins_mask.numpy()
        
        distances, predictions = self._faiss_index.search(perd_feats, max(self._recall_values))
        queries_num = len(perd_feats)
        # distances = np.reshape(distances, (queries_num, 20 * 5))
        # predictions = np.reshape(predictions, (queries_num, 20 * 5))
        for q in range(queries_num):
            # sort predictions by distance
            sort_idx = np.argsort(distances[q])
            predictions[q] = predictions[q, sort_idx]
            # remove duplicated predictions, i.e. keep only the closest ones
            
        pred_qids = self._all_db_ids[predictions]
        pred_qids[~perd_feats_insflag,:] = -1
        instances.pred_ins_ids_2 = pred_qids
        pass
    
    def process_retrieval_ins_feat_by_center(self,instances,topk = 10):
        ###
        ###  instance_r.pred_boxes_ins_mask = valid_box_index
        ###  instance_r.pred_boxes_ins_feature = pred_feature
        perd_feats = instances.pred_boxes_ins_feature.numpy()
        perd_feats_insflag = instances.pred_boxes_ins_mask.numpy()
        
        distances, predictions = self._center_faiss_index.search(perd_feats, max(self._recall_values))
        queries_num = len(perd_feats)
        # distances = np.reshape(distances, (queries_num, 20 * 5))
        # predictions = np.reshape(predictions, (queries_num, 20 * 5))
        for q in range(queries_num):
            # sort predictions by distance
            sort_idx = np.argsort(distances[q])
            predictions[q] = predictions[q, sort_idx]
            # remove duplicated predictions, i.e. keep only the closest ones
            
        pred_qids = self._center_ids[predictions]
        pred_qids[~perd_feats_insflag,:] = -1
        instances.pred_ins_ids = pred_qids
        pass
    ###输入instance实例，返回实例中所有预测框的TOPK id
    def retrieval_ins_feat(self,instances,topk=10):
        ###
        ###  instance_r.pred_boxes_ins_mask = valid_box_index
        ###  instance_r.pred_boxes_ins_feature = pred_feature
        perd_feats = instances.pred_boxes_ins_feature.numpy()
        perd_feats_insflag = instances.pred_boxes_ins_mask.numpy()
        
        distances, predictions = self._faiss_index.search(perd_feats, topk)
        queries_num = len(perd_feats)
        # distances = np.reshape(distances, (queries_num, 20 * 5))
        # predictions = np.reshape(predictions, (queries_num, 20 * 5))
        for q in range(queries_num):
            # sort predictions by distance
            sort_idx = np.argsort(distances[q])
            predictions[q] = predictions[q, sort_idx]
            # remove duplicated predictions, i.e. keep only the closest ones
            
        pred_qids = self._all_db_ids[predictions]
        return pred_qids

    def process(self,inputs, outputs):
        for input, output in zip(inputs, outputs):
            prediction = {"image_id": input["image_id"]}

            if "instances" not in output:
                continue
            
            instances = output["instances"].to('cpu')
            self._retrieval_ins_feat(instances)
                
                
            #     prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
            # if "proposals" in output:
            #     prediction["proposals"] = output["proposals"].to(self._cpu_device)
            # if len(prediction) > 1:
            #     self._predictions.append(prediction)
