"""
Author: 石沙
Date: 2020-12-06
LastEditTime: 2020-01-03
Description：本模块用于定义Hnsw检索器（HnswSelector）
"""

import faiss
import numpy as np
import sys
sys.path.append('..')
# sys.path.append(r'/home/user10000630/notespace/Chatbot_227_ef9a')
import configs.settings as conf
import os
from models.traditional import WordEmbeddingDoc
from site_packages.utils.job import DataOp


def transform_by_embedding(texts):
    """
    文档向量转换器：利用在embedding.py模块中保存的word2vec模型，
    对文档词向量进行加权平均，形成文档向量
    @param texts: list of list，已经清洗和切分后的文档集，e.g.:[['营销', '文案', '生成'], ['今天', '天气', '不错', '啊']]
    """

    embedding_params = {
        'min_count': 2,
        'size': 200,
        'workers': 4
    }

    word_emd_doc = WordEmbeddingDoc(
        model_name='word2vec',
        method='mean',
        transformers=None,
        **embedding_params
    )
    word_emd_doc.train()
    docvecs = word_emd_doc.transform(texts)
    return docvecs


class HnswSelector:
    """
    HNSW检索器

    attr：
        dim: 输入向量的维度
        layer_num: HNSW的层数
        gpu: 是否使用GPU
        neighbor_num: 每个点邻居的数量
        training: True，训练；False，加载已有模型
    """

    def __init__(self, dim=None, layer_num=100, gpu=False, neighbor_num=64, training=True):
        self.model_name = 'hnsw.index'
        if training:
            self.index = faiss.IndexHNSWFlat(dim, neighbor_num)
            if gpu:
                gpu_engine = faiss.StandardGpuResources()
                faiss.index_cpu_to_gpu(gpu_engine, 0, self.index)  # 0的意思是序号为0的GPU
            self.index.hnsw.efConstruction = layer_num
        else:
            self.index = faiss.read_index(os.path.join(conf.SAVED_MODEL_PATH, self.model_name))

    def train(self, docvecs=None):
        if self.training:
            self.index.add(docvecs)
            faiss.write_index(self.index, os.path.join(conf.SAVED_MODEL_PATH, self.model_name))
        else:
            print("无需训练，使用加载模型")

    def evaluate(self, docvecs):
        sample_cnt = docvecs.shape[0]
        D, I = self.index.search(docvecs, 1)
        missing_rate = (I == -1).sum() / float(sample_cnt)
        recall_at_1 = (I == np.arange(sample_cnt)).sum() / float(sample_cnt)
        print(f'missing_rate:{missing_rate}, recall_at_1:{recall_at_1}')
    
    def search(self, docvecs, topk=5):
        return self.index.search(docvecs, topk)


if __name__ == '__main__':
    data = DataOp.load_data('dev')
    docvecs = transform_by_embedding(data['customer_speech_list'].tolist())
    docvecs = docvecs.astype(np.float32)
    hnsw = HnswSelector(dim=docvecs.shape[1], layer_num=100, gpu=True, neighbor_num=64, training=True)
    hnsw.train(docvecs)
    hnsw.evaluate(docvecs)
    hnsw.search(docvecs[[0]])