
import numpy as np
import faiss
import logging
import h5py
from os.path import join
from pathlib import Path
from tqdm import tqdm
class GlobalFeatureIndex():
    
    def __init__(self,context=None) -> None:
        
        self._feature_2_image = {}
        self._faiss  = None
        self._gpu = False
        self._feature_dir = ''
        self._feature_dim = 4096
        
        if context:
            args = context.args
            #每个图像的特征数量
            self._featureCountArr = []
            #图像ID列表
            self._indexArr = []
            self._feat_to_img_dict = {}

            config = context.config
            features_dir = config["features"]["features_dir"]
            dataset_name = config["data"]["dataset_name"]
            datasets_folder = config["data"]["datasets_folder"]

            fdir = features_dir
            # if len(args.features_dir) > 0:
            #     fdir = args.features_dir
            self._feature_dir = join(datasets_folder, dataset_name, "images","test",fdir)
            
            self._feature_set = None
            if hasattr(context, 'feature_set') > 0:
                self._feature_set = context.feature_set
            
            #faiss
            self._faiss  = faiss.IndexFlatL2(args.features_dim)
            self._gpu = True
        pass
    
    #从当前环境中构建索引
    def build_from_featureset(self,feature_set):
        logging.debug("START BUILD FEATURES INDEX!")
        fset= None
        if feature_set != None:
            fset = feature_set
            self._feature_dir = feature_set.get_feature_dir()
        if fset == None:
            return -1
        
        featuredict = fset.get_features()
        
        # k0 = featuredict.keys()[0]
        fdim = 4096
        self._faiss = faiss.IndexFlatL2(fdim)
        if self._gpu:
            res = faiss.StandardGpuResources()
            num_gpu = faiss.get_num_gpus()
            gpu_index = faiss.index_cpu_to_gpu(res, 0, self._faiss)
            self._faiss = gpu_index
            
        pbar = tqdm(total=len(featuredict.items()))
        for imgid,features in featuredict.items():
            pbar.update()
            desc = features["global_descriptor"]
            self._faiss.add(desc.reshape(1,-1))
        pbar.close()


        
        logging.debug("FINISHED BUILD FEATURES INDEX!")
        return 0
    
    #从文件中读取
    def  read(self,fdir=None):
        if fdir != None:
            self._feature_dir = fdir

        logging.debug("START READ FEATURES INDEX FROM: "+self._feature_dir)


        feature_path = join(self._feature_dir, "features_index_def.h5")
        faiss_index_path = join(self._feature_dir, "features_faiss.index")
        
        feature_index_path_obj = Path(faiss_index_path)
        feature_path_obj = Path(feature_path)
        if not (feature_index_path_obj.exists() and feature_path_obj.exists()) :
            return -1
        
        
        faiss_cpu = faiss.read_index(faiss_index_path)
        if self._gpu:
            res = faiss.StandardGpuResources()
            gpu_index = faiss.index_cpu_to_gpu(res, 0, faiss_cpu)
            self._faiss = gpu_index
        else:
            self._faiss = faiss_cpu
        
        with h5py.File(str(feature_path), 'r') as fd:
            for imgid_str,imdata in fd.items():
                if imgid_str == "imageid_index":
                    self._read_h5data(imdata)
                    break
        
        logging.debug("FINISH READ FEATURES INDEX !")
        return 0


    def write(self,fdir=None):
        if fdir != None:
            self._feature_dir = fdir
        
        #faiss
        faiss_index_path = join(self._feature_dir, "features_faiss.index")
        faiss.write_index(self._faiss,faiss_index_path)
        
        #h5
        v1 = self._featureCountArr
        v2 = self._indexArr
        dataset = {}
        
        dataset["feature_index"] = v1
        dataset["image_index"] = v2
        
        groupstr = "imageid_index"
        feature_path = join(self._feature_dir, "features_index_def.h5")
        with h5py.File(str(feature_path), 'a') as fd:
            try:
                if groupstr in fd:
                    del fd[groupstr]
                grp = fd.create_group(groupstr)
                for k, v in dataset.items():
                    grp.create_dataset(k, data=v)

            except OSError as error:
                # if 'No space left on device' in error.args[0]:
                #     logger.error(
                #         'Out of disk space: storing features on disk can take '
                #         'significant space, did you enable the as_half flag?')
                #     del grp, fd[name]
                raise error
        pass

    #输入局部特征集合，返回图像
    def search(self,global_descriptors,topk_num=10):
        descriptors = global_descriptors
        distances, predictions = self._faiss.search(descriptors, topk_num )
        return distances, predictions   


    def _add_indices(self,imgid,featurecount):
        index0 = 0
        index1 = featurecount
        if len(self._featureCountArr) > 0:
           index0 = self._featureCountArr[-1]
           index1 = index0 + featurecount
        self._featureCountArr.append(index1)
        self._indexArr.append(imgid)
        for i in range( index0, index1):
            self._feat_to_img_dict[i] = imgid
        return None
    
    def _read_h5data(self,h5data):
        self._featureCountArr = h5data["feature_index"].__array__()
        self._indexArr = h5data["image_index"].__array__()
        for imgid_index in range(len(self._indexArr)):
            imgid = self._indexArr[imgid_index]
            index1 = self._featureCountArr[imgid_index]
            index0 =  self._featureCountArr[imgid_index-1] if imgid_index>0 else 0
            for i in range( index0, index1):
                self._feat_to_img_dict[i] = imgid


    #效率太低
    # def search(self,featureid):
    #     for i in range(len(self._featureCountArr)):
    #         if featureid < self._featureCountArr[i]:
    #             return self._indexArr[i]
    #     return -1
    
    #高效实现
    def _search_f2i(self,featureid):
        #没有做检查
        return self._feat_to_img_dict[featureid]

    def _searchArr(self,featureids):
        res=[]
        
        for i in range(len(featureids)):
            id = self._search_f2i(featureids[i])
            res.append(id)
        return np.array(res)

    def _searchArr2d(self,predictions):
        
        predictions_ids = np.array(predictions,copy=True)
        size = predictions.shape
        for i in range(size[0]):
            for j in range(size[1]):
                id = predictions[i,j]
                res = self._search_f2i(id)
                predictions_ids[i,j] = res
        return predictions_ids.astype(np.uint16)

    def _most_common(self,predictions,topk):
        keys = np.unique(predictions)
        dict_k_v = dict()
        for k in keys:
            res = np.where(predictions == k)
            dict_k_v[k] = len(res[0])

        def get_sorted_list(d, reverse=False):
            return sorted(d.items(), key=lambda x:x[1], reverse=reverse)

        d_list = get_sorted_list(dict_k_v,True)
        vv = np.array([item[0] for item in d_list[0:topk]])
        return vv