# -*- coding:utf-8 -*-
# @Time : 2022-2-14 15:51 
# @Author : suny570
# @Site   : 
# @File : similarity_search.py 
# @Software: PyCharm

import os
import jieba

from src.highlight_words.label_words import LabelWords
from src.intelligent_interaction.engine.semantic_search_hits import SimilaritySearchHits
from src.db_es.ElasticSearchClass import ElasticSearchClass
from src.db_es.es_config import ES_Config


class SimilaritySearch(object):

    def __init__(self, server_handle, model=None):
        self.server_handle = server_handle
        # self.index_name = index_name
        # self.type_name = type_name
        #self.query_json = query_json
        self.model = model

    def simi_search_core(self, index_name, type_name, field_, query_json, hit_times=None):
        query_content = query_json['title']
        # 仅仅利用标题的获取相似结果
        jsons = self.server_handle.search_used_field_match_item(index_name, type_name, field_, query_content,
                                                                hit_times + 1)
        filter_json = []
        for hits_item in jsons:
            if hits_item["title"] != query_json['title'] or \
                    hits_item['id'] != query_json['data_id']:
                filter_json.append(hits_item)

        # 标注关键词
        seg_result = jieba.lcut(query_content)
        label_words = LabelWords(seg_result)

        ret_document = []
        ret_profix = 'rank_number_'
        record_num = 1
        hits_id_arr = []
        hit_freq_arr = []
        for hits in filter_json:
            item_map = {}
            item_map['data_id'] = hits["id"]
            item_map['user_id'] = hits["user_id"]
            item_map['content_type'] = hits["content_type"]
            if field_ == 'title':
                item_map['title'] = label_words.label_words_content(hits["title"])
                tmp_content = label_words.label_words_content(hits["content"])
                item_map['content_ana_type'] = hits["content_ana_type"]
            else:
                item_map['title'] = label_words.label_words_content(hits["question"])
                tmp_content = label_words.label_words_content(hits["answer"])
                item_map['content_ana_type'] = ''
            item_map['content'] = tmp_content + '...'
            item_map['content_link'] = ''
            item_map['status'] = hits['status']
            item_map['create_time'] = hits['create_time']
            item_map['update_time'] = hits["update_time"]
            item_map['exter_1'] = hits["exter_1"]
            item_map['rank_num'] = ret_profix + str(record_num)
            hits_id_arr.append(item_map['data_id'])
            hit_freq_arr.append(item_map['exter_1'])
            ret_document.append(item_map)
            record_num += 1

        return ret_document, hits_id_arr, hit_freq_arr

    def simi_search_inter(self, index_name, type_name, query_json, field_, hit_times=None):
        """
        相似内容检索，
        :param index_name:
        :param type_name:
        :param query_json:
        :param hit_times:
        :return:
        """
        #query_content = query_json['title']

        ret_document, hits_id_arr, hit_freq_arr = self.simi_search_core(index_name, type_name, field_, query_json, hit_times)

        trim_ret_doc = []
        # 截断处理
        for item in ret_document:
            item_map = item
            tmp_content = item_map['content']
            if len(tmp_content) > 300:
                tmp_content = tmp_content[0:296]
                item_map['content'] = tmp_content + '...'
            trim_ret_doc.append(item_map)

        hit_handle = SimilaritySearchHits(self.server_handle)
        hit_handle.update_search_hits(index_name, type_name, hits_id_arr, hit_freq_arr)

        #删除相似搜索中id和搜索的ID一样的数据
        del_index = [trim_ret_doc.index(ret_init) for ret_init in trim_ret_doc if
                     ret_init['title'].replace('<em>', '').replace('</em>', '') == query_json['title']]
        if len(del_index) != 0:
            trim_ret_doc.remove(trim_ret_doc[del_index[0]])

        return trim_ret_doc

    def simi_search_list_click(self, index_name, type_name, query_json, field_, hit_times=None):
        """
        相似内容检索的数据进一步点击{详细内容和相似搜索结果填充}
        :param index_name:
        :param type_name:
        :param query_json:
        :param hit_times:
        :return:
        """
        # data_id = query_json['data_id']
        # jsons = self.server_handle.search_used_id(index_name, type_name, data_id)
        # if jsons is not None:
        #     ret_document = []
        #     item_map = {}
        #     item_map['data_id'] = jsons["id"]
        #     item_map['user_id'] = jsons["user_id"]
        #     item_map['title'] = jsons["title"]
        #     item_map['content_type'] = jsons["content_type"]
        #     item_map['content'] = jsons["content"]
        #     item_map['content_link'] = ''
        #     item_map['content_ana_type'] = jsons["content_ana_type"]
        #     item_map['status'] = jsons['status']
        #     item_map['create_time'] = jsons['create_time']
        #     item_map['update_time'] = jsons["update_time"]
        #     ret_document.append(item_map)
        #
        #     # 相似结果
        #     query_content = query_json['title']
        #     ret_document_init, hits_id_arr, hit_freq_arr = self.simi_search_core(index_name, type_name, 'title', query_content, hit_times)
        #     hit_handle = SimilaritySearchHits(self.server_handle)
        #
        #     hit_handle.update_search_hits(index_name, type_name, hits_id_arr, hit_freq_arr)
        #
        #     return ret_document, ret_document_init
        # else:
        #     return [], []
        ret_document = []
        data_id = query_json['data_id']
        if len(query_json['title']) < 1:
            return [], []
        else:
            jsons = self.server_handle.search_used_id(index_name, type_name, data_id)
            if jsons is not None:
                item_map = {}
                item_map['data_id'] = jsons["id"]
                item_map['user_id'] = jsons["user_id"]
                if field_ == 'title':
                    item_map['title'] = jsons["title"]
                    item_map['content'] = jsons["content"]
                    item_map['content_ana_type'] = jsons["content_ana_type"]
                else:
                    item_map['title'] = jsons["question"]
                    item_map['content'] = jsons["answer"]
                    item_map['content_ana_type'] = ''
                item_map['content_type'] = jsons["content_type"]
                item_map['content_link'] = ''
                item_map['status'] = jsons['status']
                item_map['create_time'] = jsons['create_time']
                item_map['update_time'] = jsons["update_time"]
                ret_document.append(item_map)

                # 相似结果
                # query_content = query_json['title']
                ret_document_init, hits_id_arr, hit_freq_arr = self.simi_search_core(index_name, type_name, field_,
                                                                                     query_json, hit_times)
                hit_handle = SimilaritySearchHits(self.server_handle)
                hit_handle.update_search_hits(index_name, type_name, hits_id_arr, hit_freq_arr)

                #删除相似搜索中id和搜索的ID一样的数据
                collect_id = []
                [collect_id.append(ret_init['data_id']) for ret_init in ret_document_init] #收集ret_document_init的id
                # print('collect_id', collect_id)
                if ret_document[0]['data_id'] in collect_id:
                    del_index = [ret_document_init.index(ret_init) for ret_init in ret_document_init if ret_init['data_id']
                                 == ret_document[0]['data_id']][0]
                    ret_document_init.remove(ret_document_init[del_index])

                return ret_document, ret_document_init
            else:
                return [], []
        # else:
        #     # 相似结果
        #     #query_content = query_json['title']
        #     ret_document_init, hits_id_arr, hit_freq_arr = self.simi_search_core(index_name, type_name, field_,
        #                                                                          query_json, hit_times)
        #     hit_handle = SimilaritySearchHits(self.server_handle)
        #     hit_handle.update_search_hits(index_name, type_name, hits_id_arr, hit_freq_arr)

            # return ret_document, ret_document_init

if __name__ == '__main__':
    es_config = ES_Config()
    ##测试环境
    es_service_conn = ElasticSearchClass(es_config.env_names['test']['HOST'], es_config.env_names['test']['PORT'],
                                          es_config.env_names['test']['USER'], es_config.env_names['test']['PASSWORD'])
    simi_search = SimilaritySearch(es_service_conn)
    data = {'query': '性质类别：政法类', 'title': '请问中国人民公安大学是什么类型的学校？', "data_id": 8456, 'pro_id': '0013'}

    ##相似搜索
    ret_json = simi_search.simi_search_inter(es_config.index_names['province']['0013'],
                                             es_config.type_names['type'], data, 'question', 10)
    print(ret_json)
    # ret_json = simi_search.simi_search_inter(es_config.index_names['news']['index'],
    #                                          es_config.type_names['type'], data, 'title', 10)
    # print(ret_json)

    ###相似搜索点击
    # ret_init_doc, ret_json = simi_search.simi_search_list_click(es_config.index_names['province']['0013'],
    #                                                             es_config.type_names['type'], data,
    #                                                             'question', 10)
    # ret_init_doc, ret_json = simi_search.simi_search_list_click(es_config.index_names['news']['index'],
    #                                                             es_config.type_names['type'], data,
    #                                                             'title', 10)
    # print('ret_init_doc', ret_init_doc)
    # print('ret_json', ret_json)
