# -*- coding:utf-8 -*-
# @Time : 2022-8-16
# @Author : suny570 dyu
# @Site   :
# @File : semantic_search.py
# @Software: PyCharm

import os
import jieba
import pandas as pd

from src.db_es.es_config import ES_Config
from src.db_es.ElasticSearchClass import ElasticSearchClass

from src.highlight_words.label_words import LabelWords
from src.intelligent_interaction.engine.model_inference import BasePredictor
from src.intelligent_interaction.engine.semantic_search_hits import SimilaritySearchHits
from src.utils_local.rouge_score import compute_metrics

faq_instance = BasePredictor('encyclopedia')

class SemanticSearch(object):

    def __init__(self, server_handle, model=None):
        self.server_handle = server_handle
        # self.index_name = index_name
        # self.type_name = type_name
        #self.query_json = query_json
        self.model = model

    # 调用标准的库， 供测试
    def standard_search_inter(self, index_name, type_name, query_json, pro_id, hit_times=None):
        """
        问题搜索策略
        1. 语义解析接口，取top1
        2. 检索策略，
                     用户制定检索库，运用1的结果去检索，
                     否认则：
                        直接调用标准库，
                        若不成功则进入用户检索库，
                        最后兜底新文库，
        :param index_name:
        :param type_name:
        :param query_json:
        :param hit_times:
        :return:
        """
        query_content = query_json['query']
        # 标注关键词
        seg_result = jieba.lcut(query_content)
        label_words = LabelWords(seg_result)

        # FAQ result
        faq_results = faq_instance.predict(query_content, pro_id)
        faq_jsons = []  # faq 的结果集合
        if len(faq_results) == 0:
            # 查询标准库
            faq_jsons = self.server_handle.search_used_field_match_item(index_name, type_name, "question", query_content, hit_times)
        else:
            faq_answers = [res[0] for res in faq_results]      # answer 答案
            faq_confidences = [res[1] for res in faq_results]  # answer 置信度
            # faq_search_contents = []   # faq 语义搜索
            # for i in range(0, len(faq_answers)):
            #     if(float(faq_confidences[i]) > 0): # FAQ算法置信度阈值, 不做阈值判断, 后台来做
            #         faq_search_contents.append(faq_answers[i])
            ids_set = set()
            # if len(faq_answers) > 0:
            for ans in faq_answers:
                if len(ans) > 0:
                    # 根据答案在ES库中搜索整条数据
                    tmp_jsons = self.server_handle.search_used_field_match_item(index_name, type_name, "answer", ans, 1)
                    for item in tmp_jsons:
                        if item is not None and len(item) > 0:
                            if (item["id"] in ids_set) == False:
                                faq_jsons.append(item)
                                ids_set.add(item["id"])
#             else:
#                 # 标准库
#                 faq_jsons = self.server_handle.search_used_field_match_item(index_name, type_name, "question", query_content, hit_times)

            # 组装命中个数,不够数据的时候的处理办法
            if len(faq_jsons) < hit_times:
                tmp_jsons = self.server_handle.search_used_field_match_item(index_name, type_name, "question", query_content,
                                                                            hit_times)
                if len(tmp_jsons) > 0:
                    for hits in tmp_jsons:
                        if hits["id"] not in ids_set:
                            faq_jsons.append(hits)
                            ids_set.add(hits["id"])

        ret_document = []
        ret_profix = 'rank_number_'
        hits_id_arr = []
        hit_freq_arr = []
        record_num = 1
        #最终结果
        for hits in faq_jsons:
            if record_num > hit_times:
                break
            else:
                item_map = {}
                item_map['data_id'] = hits["id"]
                item_map['user_id'] = hits["user_id"]
                item_map['title'] = label_words.label_words_content(hits["title"])
                item_map['content_type'] = hits["content_type"]
                item_map['content'] = label_words.label_words_content(hits["answer"])
                item_map['standard_question'] = label_words.label_words_content(hits["question"])
                item_map['content_link'] = ''
                item_map['content_ana_type'] = ''
                item_map['status'] = hits['status']
                item_map['create_time'] = hits['create_time']
                item_map['update_time'] = hits["update_time"]
                item_map['hit_freq'] = hits['exter_1']
                item_map['rank_num'] = ret_profix+str(record_num)
                ret_document.append(item_map)
                record_num += 1
                hits_id_arr.append(item_map['data_id'])
                hit_freq_arr.append(item_map['hit_freq'])

        # 每次搜索到，更新搜索频次hit_freq_arr
        hit_handle = SimilaritySearchHits(self.server_handle)
        hit_handle.update_search_hits(index_name, type_name, hits_id_arr, hit_freq_arr)

        return ret_document

    def standard_search_inter_epri(self, index_name, type_name, query_json, pro_id, hit_times=None):
        """
        演示版本问题搜索策略
        :param index_name:
        :param type_name:
        :param query_json:
        :param hit_times:
        :return:
        """
        query_content = query_json['query']
        # 标注关键词
        seg_result = jieba.lcut(query_content)
        label_words = LabelWords(seg_result)

        # FAQ result
        faq_results = faq_instance.predict(query_content, pro_id)
        print("faq contents {}".format(str(faq_results)))
        faq_jsons = []  # faq 的结果集合
        if len(faq_results) == 0:
            # 查询标准库
            faq_jsons = self.server_handle.search_used_field_match_item(index_name, type_name, "question",
                                                                        query_content, hit_times)
        else:
            faq_answers = [res[0] for res in faq_results]  # answer 答案
            faq_confidences = [res[1] for res in faq_results]  # answer 置信度

            ids_set = set()
            idx = 0
            for ans in faq_answers:
                print("query {} \t answer {} \t confidences {}".format(query_content, ans, faq_confidences[idx]))
                idx = idx + 1
                if len(ans) > 0:
                    # 根据答案在ES库中搜索整条数据
                    tmp_jsons = self.server_handle.search_used_field_match_item(index_name, type_name, "answer",
                                                                                ans, 1)
                    for item in tmp_jsons:
                        if item is not None and len(item) > 0:
                            if (item["id"] in ids_set) == False:
                                faq_jsons.append(item)
                                ids_set.add(item["id"])
            if len(faq_jsons) < hit_times:
                tmp_jsons = self.server_handle.search_used_field_match_item(index_name, type_name, "question",
                                                                            query_content,
                                                                            hit_times)
                if len(tmp_jsons) > 0:
                    for hits in tmp_jsons:
                        if hits["id"] not in ids_set:
                            faq_jsons.append(hits)
                            ids_set.add(hits["id"])

        ret_document = []
        ret_profix = 'rank_number_'
        record_num = 1
        # final result
        for hits in faq_jsons:
            if record_num > hit_times:
                break
            else:
                item_map = {}
                item_map['rank_num'] = ret_profix + str(record_num)
                item_map['id'] = hits["id"]
                item_map['user_id'] = hits["user_id"]
                item_map['title'] = label_words.label_words_content(hits["title"])
                item_map['content_type'] = hits["content_type"]
                item_map['question'] = label_words.label_words_content(hits["question"])
                item_map['exter_1'] = hits['exter_1']
                item_map['exter_2'] = hits['exter_2']
                item_map['answer'] = label_words.label_words_content(hits["answer"])
                item_map['exter_3'] = hits['exter_3']
                item_map['exter_4'] = hits['exter_4']
                item_map['exter_5'] = hits['exter_5']
                item_map['exter_6'] = hits['exter_6']
                item_map['remarks_1'] = hits['remarks_1']
                item_map['remarks_2'] = hits['remarks_2']
                item_map['status'] = hits['status']
                item_map['create_time'] = hits['create_time']
                item_map['update_time'] = hits["update_time"]

                ret_document.append(item_map)
                record_num += 1
        return ret_document

    def data_detection(self, index_name, type_name, query_json, num=4, title_rate=0.25, content_rate=0.25):
        """
        数据检测：
        根据用户输入标题选取两个最相似问题；
        根据用户输入内容选取两个醉相思问题
        Args:
            index_name:
            type_name:
            query_json: query = {'title':'西游记孙悟空',
                                 'content':'孙悟空（又称齐天大圣、孙行者、斗战胜佛），是中国古典神魔小说《西游记》中的主要角色之一（传为吴承恩所著）'}
            hit_times:
        Returns:
        """

        result_json = []
        question = query_json['title']

        rouge_score_title = []
        tmp_jsons = self.server_handle.search_used_field_match_item(index_name, type_name, "question", question, 2*int(num/2))
        # print(tmp_jsons)
        for qa in tmp_jsons:
            score = compute_metrics(question, qa['question'])
            rouge_score_title.append(score)

        # print(rouge_score_title)
        max_score_index = list(pd.Series(rouge_score_title).sort_values(ascending=False).index[:int(num/2)])
        # print(max_score_index)

        for i, qa in enumerate([tmp_jsons[i] for i in max_score_index]):
            temp_json = {}

            if rouge_score_title[max_score_index[i]] > title_rate:
                temp_json['pass'] = 1
            else:
                temp_json['pass'] = 0
            temp_json['status'] = qa['status']
            temp_json['user_id'] = qa['user_id']
            temp_json['content_type'] = qa['content_type']
            temp_json['question'] = qa['question']
            temp_json['answer'] = qa['answer']
            temp_json['create_time'] = qa['create_time']
            temp_json['update-time'] = qa['update_time']
            result_json.append(temp_json)


        # result_len = len(result_json)

        rouge_score_content = []
        answer = query_json['content']
        tmp_jsons = self.server_handle.search_used_field_match_item(index_name, type_name, "answer", answer, 2*(num-int(num/2)))
        # print(tmp_jsons)
        for qa in tmp_jsons:
            score = compute_metrics(answer, qa['answer'])
            rouge_score_content.append(score)

        # print(rouge_score_content)
        max_score_index = list(pd.Series(rouge_score_content).sort_values(ascending=False).index[:(num-int(num / 2))])
        # print(max_score_index)
        ret_profix = 'rank_number_'
        count_num = 1
        for i, qa in enumerate([tmp_jsons[i] for i in max_score_index]):
            temp_json = {}
            if rouge_score_content[max_score_index[i]] > content_rate:
                temp_json['pass'] = 1
            else:
                temp_json['pass'] = 0
            temp_json['status'] = qa['status']
            temp_json['user_id'] = qa['user_id']
            temp_json['content_type'] = qa['content_type']
            temp_json['question'] = qa['question']
            temp_json['answer'] = qa['answer']
            temp_json['create_time'] = qa['create_time']
            temp_json['update-time'] = qa['update_time']
            temp_json["rank_num"] = ret_profix + str(count_num)
            count_num = count_num + 1
            result_json.append(temp_json)

        # print(result_json)
        return result_json

    def standard_search_inter_strategy(self, index_name, type_name, query_json, hit_times=None):
        """
        标准答案搜索，最早测试版本
        :param index_name:
        :param type_name:
        :param query_json:
        :param hit_times:
        :return:
        """
        query_content = query_json['query']
        jsons = self.server_handle.search_used_field_match_item(index_name, type_name, "question", query_content,
                                                                hit_times)  # 问题找答案
        # 标注关键词
        seg_result = jieba.lcut(query_content)
        label_words = LabelWords(seg_result)
        # 返回的答案结果数组
        ret_document = []
        ret_profix = 'rank_number_'
        record_num = 1  # 返回答案的序号
        for hits in jsons:
            item_map = {}
            item_map['data_id'] = hits["id"]
            item_map['user_id'] = hits["user_id"]
            item_map['title'] = label_words.label_words_content(hits["title"])
            item_map['content_type'] = hits["content_type"]
            item_map['content'] = label_words.label_words_content(hits["answer"])
            item_map['standard_question'] = label_words.label_words_content(hits["question"])
            item_map['content_link'] = ''
            item_map['content_ana_type'] = ''
            item_map['status'] = hits['status']
            item_map['create_time'] = hits['create_time']
            item_map['update_time'] = hits["update_time"]
            item_map['rank_num'] = ret_profix + str(record_num)
            ret_document.append(item_map)
            record_num += 1
        return ret_document

    def hottop_search_inter(self, history_data):
        """
        热点搜索策略, 根据历史搜索记录来存储hot_top文件
        :return:
        """
        ### Plan A简单策略：队列策略，进一个出一个，排在最前头
        _id, question = history_data[0]['data_id'], history_data[0]['standard_question']
        insert_hot = "{}|{}\n".format(str(_id), question)
        with open(os.getcwd() + '/src/hot_top.txt', 'r', encoding='utf-8') as f:
            hot_list = f.readlines()

        if question not in [i.strip().split('|')[1] for i in hot_list]:
            hot_list.insert(0, insert_hot)
            with open(os.getcwd() + '/src/hot_top.txt', 'w', encoding='utf-8') as fw:
                for l in hot_list[:10]:
                    fw.write(l)

    def hottop_search_es_inter(self, index_name, type_name, hot_hits_times=10):
        # 记录了两张表，但是目前只针对标准表
        ret_json = self.server_handle.search_top_records(index_name, type_name, 'exter_1', 0, hot_hits_times)
        ret_document = []
        ret_profix = 'rank_number_'
        record_num = 1
        for hits in ret_json:
            item_map = {}
            item_map['id'] = hits["id"]
            # item_map['user_id'] = hits["user_id"]
            # item_map['title'] = hits["title"]
            # item_map['content_type'] = hits["content_type"]
            # item_map['content'] = hits["answer"]
            item_map['standard_question'] = hits["question"]
            # item_map['content_link'] = ''
            # item_map['content_ana_type'] = ''
            # item_map['status'] = hits['status']
            # item_map['create_time'] = hits['create_time']
            # item_map['update_time'] = hits["update_time"]
            # item_map['hit_freq'] = hits['exter_1']
            item_map['rank_num'] = ret_profix + str(record_num)
            ret_document.append(item_map)
            record_num += 1

        return ret_document

if __name__ == '__main__':
    # ES 连接
    es_config = ES_Config()
    ##测试环境
    es_handle = ElasticSearchClass(es_config.env_names['test']['HOST'], es_config.env_names['test']['PORT'],
                                          es_config.env_names['test']['USER'], es_config.env_names['test']['PASSWORD'])

    data_dec = SemanticSearch(es_handle)
    query = {'title': '西游记孙悟空',
             'content': '第二十七话：悟空的反击'}
    # a = data_dec.data_detection(es_config.index_names['stand']['index'], es_config.type_names['type'], query,4,0.25,0.000001)
    # print(a)

    a = data_dec.standard_search_inter_strategy(es_config.index_names['stand']['index'], es_config.type_names['type'], query,10)
    print(a)
