# -*- coding:utf-8 -*-
# Copyright 2022 The Chinaunicom Software Team. All rights reserved.
# @FileName: annoy.py
# @Author  : Dyu
# @Time    : 2022/8/11


from annoy import AnnoyIndex
import random
import numpy as np
import pandas as pd
import json, re, os, codecs, time
from tqdm import tqdm
from src.intelligent_interaction.engine.sem_predictor import SimbertEmbeding
from src.intelligent_interaction.engine.sem_conf import *


class AnnoyRecall:
    """
    Multiple methods of Annoy recall
    """

    def __init__(self, f_dim, ann_path=False, source=False):
        """
        :paramter f_dim  : 向量维度初始化
        :paramter n_sims : 召回的个数
        :paramter all_qid : 是否输入所有的tid, 默认为False
        :paramter npy_data: 是否输入npy数据, 默认为False
        """
        self.f_dim = f_dim
        self.sim = AnnoyIndex(self.f_dim, 'angular')

        if ann_path:  # 是否带入ann模型路径
            print('ann_path:', ann_path)
            self.sim.load(ann_path, prefault=False)
        else:
            self.ann_path = config['pro_id']['faq_stand']['ann_path']
            self.sim.load(self.ann_path, prefault=False)

        if source:  # 是否带入全量数据路径
            self.source_data = pd.read_csv(source, encoding='utf-8')
        else:
            self.source_data = pd.read_csv(config['pro_id']['faq_stand']['source_path'], encoding='utf-8')

    def train_annoy_index(self, all_id, npy_data, ann_path):
        """
        inner method: 训练annoy索引
        :paramter :
        """
        assert len(all_id) == len(npy_data)  # The length of the generated npy file is not the same as the tid length

        t = AnnoyIndex(self.f_dim, 'angular')  # 实例化annoy, 用余弦相似度
        for i in range(len(all_id)):
            # print("第 {} 个ing...".format(i))
            # v = [random.gauss(0, 1) for z in range(f_dim)]
            t.add_item(i, npy_data[i])
        t.build(150)  # 建立n_trees树的森林，查询时，树越多，精度越高.
        t.save(ann_path, prefault=False)
        print("Save ANN Finish!")

    def index_recall(self, index, n_sims, search_k):
        """
        Inner Method: 使用tid的index召回相似题方法（可以选的多个或单个）
        :paramter index :  tid映射的index
        :paramter n_sims:  召回的数据
        :paramter search_k:  搜索权衡因子
        :return :
        """
        start_time = time.time()

        # 批量测试
        # lis = [self.sim.get_nns_by_item(i+1, 100) for i in range(100)]
        # for i in range(10000):
        #     u.get_nns_by_item(i+1, 100)
        # _index = self.all_qid.index(index)
        result = self.sim.get_nns_by_item(i=index, n=n_sims, search_k=search_k, include_distances=True)
        end_time = time.time()
        print("Annoy Time:", end_time - start_time)
        return result

    def vecter_recall_predict(self, vector, n_sims, search_k, confidence):
        """
        Inner Method: 加有去重和重排的vector召回
        :paramter vector  :  embeded的vector
        :paramter n_sims  :  召回的数据
        :paramter search_k:  搜索权衡因子
        :paramter confidence: 排序置信度
        :return predict:
        """
        result = self.sim.get_nns_by_vector(vector, n_sims, search_k, include_distances=True)  # 拿到ann的召回结果
        qids, distances = result[0], result[1]
        cfd = [1 - dis for dis in distances]  # 取置信度
        qids, filter_cfd = self.reorder_and_dedup(qids, cfd, confidence)  # 去重和过滤
        answers = [self.source_data.at[qid, 'answer'] for qid in qids]  # 找出答案
        predict = list(zip(answers, filter_cfd))  # 组合数据 & 置信度
        # print("ANNOY result:", qids, filter_cfd)
        return predict

    def reorder_and_dedup(self, qids, distances, confidence):
        """
        重排和去重、过滤置信度
        :paramter qids :  问题id
        :paramter distances : 距离集合
        :paramter confidence: 排序置信度
        """
        new_qids, new_distances = [], []
        for i in range(len(qids)):
            if qids[i] not in new_qids and distances[i] > confidence:
                new_qids.append(qids[i])
                new_distances.append(distances[i])
        return new_qids, new_distances

    def make_ann_model(self, df_data, ann_path, npy_path=None):
        """
        制作ANN模型
        :paramter df_data :  全量数据
        :paramter ann_path:  ann保存模型路径
        :paramter npy_path:  npy数据路径， default为None
        """
        print("Annoy Train Begin>>>")
        qid = df_data['qid'].tolist()
        questions = df_data['question'].tolist()

        start = time.time()
        # 2. 使用batch_predictor对predictfile数据预测，并保存为npy文件(这里不持久化)
        # simbert = SimbertEmbeding()
        # output_vectors = simbert.single_embedding(text)

        if npy_path is not None:
            vectors = np.load(npy_path)
        # 3. 实时训练Annoy
        self.train_annoy_index(qid, vectors, ann_path)
        print("训练数据量为: ", len(qid))
        print("Online Training Annoy finish ! All Time is {} s".format(round(time.time() - start, 5)))


def _test_ann(data_df):
    simbert = SimbertEmbeding()
    vec = simbert.single_embedding('到底什么是犯罪的嫌疑人呢？')
    annoy = AnnoyRecall(f_dim=768, ann_path='ann_model/沃灵通_qa.ann', source="data/沃灵通_qa_data.csv")
    predict = annoy.vecter_recall_predict(vector=vec, n_sims=10, search_k=-1)
    print(predict)


if __name__ == '__main__':
    """
    data_df = pd.read_csv(r"data/沃灵通_qa_data.csv", encoding='utf-8')
    annoy = AnnoyRecall(f_dim=768)
    annoy.make_ann_model(data_df, r'沃灵通_qa.ann', r'沃灵通_qa_vecs.npy')
    """

    # data_df = pd.read_csv(r"data/沃灵通_qa_data.csv", encoding='utf-8')
    # _test_ann(data_df)
    pass
