
import numpy as np
import faiss
import logging
import h5py
from os.path import join
from pathlib import Path
from tqdm import tqdm
class LocalFeatureIndex():
    
    def __init__(self,context=None) -> None:
        
        #每个图像的特征数量
        self._featureCountArr = []
        #图像ID列表
        self._index_to_imgid = []
        self._imgid_to_index_dict = {}
        
        self._feat_to_img_dict = {}
        
        self.features_dim = 256
        
        if context:
            args = context.args

            self.features_dim = args.fve.features_dim

            config = context.config
            features_dir = config["features"]["features_dir"]
            dataset_name = config["data"]["dataset_name"]
            datasets_folder = config["data"]["datasets_folder"]

            fdir = features_dir
            # if len(args.features_dir) > 0:
            #     fdir = args.features_dir
            self._feature_dir = join(datasets_folder, dataset_name, "images","test",fdir)
            
            self._feature_set = None
            if hasattr(context, 'feature_set') > 0:
                self._feature_set = context.feature_set
        
        #faiss
        self._faiss  = faiss.IndexFlatL2(self.features_dim)
        self._gpu = True
        pass
    
    #从当前环境中构建索引
    def build_from_featureset(self,feature_set):
        logging.debug("START BUILD FEATURES INDEX!")
        fset= None
        if feature_set != None:
            fset = feature_set
            self._feature_dir = feature_set.get_feature_dir()
        elif  self._feature_set != None:
            fset = self._feature_set
        if fset == None:
            return -1
        
        featuredict = fset.get_features()

        pbar = tqdm(total=len(featuredict.items()))
        for imgid,features in featuredict.items():
            pbar.update()
            desc = features["descriptors"]
            fcnt = len(desc)
            self._add_indices(imgid,fcnt)
            self._faiss.add(desc)
        pbar.close()

        # if self._gpu:
        #     res = faiss.StandardGpuResources()
        #     gpu_index = faiss.index_cpu_to_gpu(res, 0, self._faiss)
        #     self._faiss = gpu_index
        
        logging.debug("FINISHED BUILD FEATURES INDEX!")
        return 0
    
    def set_feature_dir(self,fdir):
        self._feature_dir = fdir
    
    #从文件中读取
    def  read(self,fdir=None):
        if fdir != None:
            self._feature_dir = fdir

        logging.debug("START READ FEATURES INDEX FROM: "+self._feature_dir)


        feature_path = join(self._feature_dir, "features_index_def.h5")
        faiss_index_path = join(self._feature_dir, "features_faiss.index")
        
        feature_index_path_obj = Path(faiss_index_path)
        feature_path_obj = Path(feature_path)
        if not (feature_index_path_obj.exists() and feature_path_obj.exists()) :
            return -1
        
        
        faiss_cpu = faiss.read_index(faiss_index_path)
        if self._gpu:
            try:
                res = faiss.StandardGpuResources()
                gpu_index = faiss.index_cpu_to_gpu(res, 0, faiss_cpu)
                self._faiss = gpu_index
            except  OSError as error:
                print("XXXXXXXXXXXXXXXX")
                raise error
                
        else:
            self._faiss = faiss_cpu
        
        with h5py.File(str(feature_path), 'r') as fd:
            for imgid_str,imdata in fd.items():
                if imgid_str == "imageid_index":
                    self._read_h5data(imdata)
                    break
        
        logging.debug("FINISH READ FEATURES INDEX !")
        return 0


    def write(self,fdir=None):
        if fdir != None:
            self._feature_dir = fdir
        
        #faiss
        faiss_index_path = join(self._feature_dir, "features_faiss.index")
        faiss.write_index(self._faiss,faiss_index_path)
        
        #h5
        v1 = self._featureCountArr
        v2 = self._index_to_imgid
        dataset = {}
        
        dataset["feature_index"] = v1
        dataset["image_index"] = v2
        
        groupstr = "imageid_index"
        feature_path = join(self._feature_dir, "features_index_def.h5")
        with h5py.File(str(feature_path), 'a') as fd:
            try:
                if groupstr in fd:
                    del fd[groupstr]
                grp = fd.create_group(groupstr)
                for k, v in dataset.items():
                    grp.create_dataset(k, data=v)

            except OSError as error:
                # if 'No space left on device' in error.args[0]:
                #     logger.error(
                #         'Out of disk space: storing features on disk can take '
                #         'significant space, did you enable the as_half flag?')
                #     del grp, fd[name]
                raise error
        pass

    #输入局部特征集合，返回图像
    def search(self,features,topk_num=10):
        try:
            descriptors = features["descriptors"]
            distances, predictions = self._faiss.search(descriptors, topk_num )
            predictions_ids = self._searchArr2d(predictions)
            predictions_ids_top_k = self._most_common(predictions_ids,topk_num)
            return predictions_ids_top_k   
        except OSError as error:
            raise error

    def _add_indices(self,imgid,featurecount):
        index0 = 0
        index1 = featurecount
        if len(self._featureCountArr) > 0:
           index0 = self._featureCountArr[-1]
           index1 = index0 + featurecount
        self._featureCountArr.append(index1)
        # self._img_to_feat_dict[imgid] = [index0,index1]
        self._index_to_imgid.append(imgid)
        self._imgid_to_index_dict[imgid] = len(self._index_to_imgid) - 1
        for i in range( index0, index1):
            self._feat_to_img_dict[i] = imgid
        return None
    
    def _read_h5data(self,h5data):
        self._featureCountArr = h5data["feature_index"].__array__()
        self._index_to_imgid = h5data["image_index"].__array__()
        for imgid_index in range(len(self._index_to_imgid)):
            imgid = self._index_to_imgid[imgid_index]
            self._imgid_to_index_dict[imgid] = imgid_index
            index1 = self._featureCountArr[imgid_index]
            index0 =  self._featureCountArr[imgid_index-1] if imgid_index>0 else 0
            for i in range( index0, index1):
                self._feat_to_img_dict[i] = imgid


    #效率太低
    # def search(self,featureid):
    #     for i in range(len(self._featureCountArr)):
    #         if featureid < self._featureCountArr[i]:
    #             return self._indexArr[i]
    #     return -1
    
    #高效实现
    #输入featureid 返回对应图像id
    def _search_f2i(self,featureid):
        #没有做检查
        return self._feat_to_img_dict[featureid]

    def _searchArr(self,featureids):
        res=[]
        
        for i in range(len(featureids)):
            id = self._search_f2i(featureids[i])
            res.append(id)
        return np.array(res)

    def _searchArr2d(self,predictions):
        
        predictions_ids = np.array(predictions,copy=True)
        size = predictions.shape
        for i in range(size[0]):
            for j in range(size[1]):
                id = predictions[i,j]
                res = self._search_f2i(id)
                predictions_ids[i,j] = res
        return predictions_ids.astype(np.uint16)

    def _most_common(self,predictions,topk):
        keys = np.unique(predictions)
        dict_k_v = dict()
        for k in keys:
            res = np.where(predictions == k)
            dict_k_v[k] = len(res[0])

        def get_sorted_list(d, reverse=False):
            return sorted(d.items(), key=lambda x:x[1], reverse=reverse)

        d_list = get_sorted_list(dict_k_v,True)
        vv = np.array([item[0] for item in d_list[0:topk]])
        return vv
    
    #输入待检索的特征，返回特征库中的TOPK
    #特征点ID , 所在图像ID ，所在图像的索引，距离
    def search_features(self,features,topk_num):
        try:
            descriptors = features["descriptors"]
            distances, predictions = self._faiss.search(descriptors, topk_num )
            ##每个特征点所在的图像
            predictions_img = self._searchArr2d(predictions)
            ##每个特征点在该图像中的id
            
            pred_fid_in_img = np.array(predictions,copy=True)
            size = predictions.shape
            for i in range(size[0]):
                for j in range(size[1]):
                    all_id = predictions[i,j]
                    imgid = predictions_img[i,j]
                    indid = self._imgid_to_index_dict[imgid]
                    if indid == 0:
                        pred_fid_in_img[i,j] = all_id
                    else:
                        indid = indid -1
                        pred_fid_in_img[i,j] = all_id - self._featureCountArr[indid]

            return predictions,predictions_img,pred_fid_in_img,distances
        except OSError as error:
            raise error