# -*- coding: utf-8 -*-
# @Author  : suny570
# @Time    : 2022-04-04 22:12


import os
import sys
import time

from src.beans.field_beans import FieldsBean
from src.utils_local.fileutil import read_file_get_data_standards
from src.utils_local.fileutil import read_file_get_data_news
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
rootPath = os.path.split(rootPath)[0]
# 先引入目录的上级就好
sys.path.append(rootPath)

from src.db_es.ElasticSearchClass import ElasticSearchClass
import src.db_es.es_config as Config

# 大批量增加数据，没有实践环境，暂停
# def export_excel(client ,file_path, index_name, document_type):
#     """Dumping data in bulk to elasticsearch in chunk of 5000."""
#     data = read_file_get_data(file_path, index_name, document_type)
#     print(len(data))
#     for i in range(0, len(data), 5000):
#         try:
#             client.create_index_bulk(data[i: i + 5000])
#         except Exception as e:
#             print(str(e))

def insert_local_data_into_es(es_client, index_name, doc_type, file_):
    """
    从文件中读取数据，并存储到es中
    :param file: 文件包括完整路径, 不是真正的Excel文件
    :return:
    """
    meta_bean = FieldsBean()
    fields_arr = meta_bean.get_standards_fields_list()
    list = read_file_get_data_standards(file_, index_name, doc_type,fields_arr)
    dates_set = load_release_date_dict(es_client,index_name)
    idx = es_client.count(index_name) + 1
    #print('the begin index is ' + str(idx))
    for item in list:
        if item['_source']['id'] not in dates_set:
            # 新数据入库
            res = es_client.create_index_used_type_id_body(index_name,doc_type, idx, item["_source"])
            if res is not None:
                print("successed line is \t" + str(idx))
            idx += 1

def insert_local_news_data_into_es(es_client, index_name, doc_type, file_):
    """
    从文件中读取数据，并存储到es中
    :param file: 文件包括完整路径, 不是真正的Excel文件
    :return:
    """
    #首先实例化
    meta_bean = FieldsBean()
    #调用FieldsBean类里面的get_news_fields_list
    fields_arr = meta_bean.get_news_fields_list()
    list = read_file_get_data_news(file_, index_name, doc_type,fields_arr)
    dates_set = load_release_date_dict(es_client,index_name)
    idx = es_client.count(index_name) + 1
    #print('the begin index is ' + str(idx))
    for item in list:
        if item['_source']['id'] not in dates_set:
            # 新数据入库
            res = es_client.create_index_used_type_id_body(index_name,doc_type, idx, item["_source"])
            if res is not None:
                print("successed line is \t" + str(idx))
            idx += 1

# 加载历史ID，用于过滤重复的数据
def load_release_date_dict(es_handle, index_name):
    all_response = es_handle.search(index_name,500000)
    dates_set = set()
    if all_response is None:
        pass
    else:
        jsons = all_response["hits"]["hits"]
        for hits in jsons:
            dates_set.add(hits['_source']['id'])

        print("ES DB Contains release_date numbers \t" + str(len(dates_set)))
    return dates_set


if __name__ == '__main__':

    # ES 连接
    #es_handle = ElasticSearchClass(Config.ONLINE_ES_HOST, Config.ONLINE_ES_PORT, "", "")

    # es_handle = ElasticSearchClass(Config.TEST_ONLINE_ES_HOST, Config.TEST_ONLINE_ES_PORT
    #                                      ,Config.TEST_ONLINE_ES_USER, Config.TEST_ONLINE_ES_PASSWORD)
    es_handle = ElasticSearchClass(Config.PRODUCT_ONLINE_ES_HOST, Config.PRODUCT_ONLINE_ES_PORT
                                         ,Config.PRODUCT_ONLINE_ES_USER, Config.PRODUCT_ONLINE_ES_PASSWORD)
    # 插入数据

    # insert_local_news_data_into_es(es_handle, Config.ONLINE_ES_INDEX_NEWS, Config.ONLINE_ES_TYPE_NEWS, r'F:\AI_work\ES_based on search\corpus_class\all.txt')

    # 总数目
    # print(str(es_handle.count(Config.ONLINE_ES_INDEX)))
    # print("新闻数据总数",str(es_handle.count(Config.ONLINE_ES_INDEX_NEWS)))

    # standard_data_file = r'E:\软研院项目\2021\知识图谱\FAQ_data.txt'
    # insert_local_data_into_es(es_handle, Config.ONLINE_ES_INDEX, Config.ONLINE_ES_TYPE, standard_data_file)

    # 总数目
    #print(str(es_handle.count(Config.ONLINE_ES_INDEX)))

    #搜索测试
    # res_list = es_handle.search(Config.ONLINE_ES_INDEX_NEWS, 500)['hits']['hits']
    # for item in res_list:
    #    print("搜索得到的数据为", str(item))

    # 标准问题搜索测试
    # ret_field_list = es_handle.search_used_field_and_item(Config.ONLINE_ES_INDEX, Config.ONLINE_ES_TYPE, "question", "珠穆朗玛峰",100)
    # if ret_field_list is not None:
    #    for fid in ret_field_list:
    #         print(str(fid))
    # else:
    #     print("no search result return")

    # jsons = es_handle.search_used_field_and_item_body(Config.ONLINE_ES_INDEX, Config.ONLINE_ES_TYPE,"question", "珠穆朗玛峰有多高", 100)
    jsons = es_handle.search_used_field_match_item(Config.ONLINE_ES_INDEX, Config.ONLINE_ES_TYPE, "question", "清炖子鸡需要多少克子鸡", 2)
    # jsons_ans = es_handle.search_used_field_match_item(Config.ONLINE_ES_INDEX, Config.ONLINE_ES_TYPE, "answer",
    #                                                "月亮山主峰海拔1490.3米，相对高差1100余米，山体雄伟高大，沟谷切割深长。", 1)
    ret_document = []

    # jsons_ans.extend(jsons)

    print(jsons)
    # print(jsons_ans)
    ### 检索字段并保存字段值
    for hits in jsons:
        ret_document.append(hits["answer"])
    print(str(ret_document))

    ret_document = []
    ret_profix = 'rank_number_'
    record_num = 1
    # 检索字段并保存字段值
    for hits in jsons:
        item_map = {}
        item_map['data_id'] = hits["id"]
        item_map['user_id'] = hits["user_id"]
        item_map['title'] = hits["title"]
        item_map['content_type'] = hits["content_type"]
        item_map['content'] = hits["answer"]
        item_map['standard_question'] = hits["question"]
        item_map['content_link'] = ''
        item_map['content_ana_type'] = ''
        item_map['status'] = hits['status']
        item_map['create_time'] = hits['create_time']
        item_map['update_time'] = hits["update_time"]
        item_map['rank_num'] = ret_profix + str(record_num)
        ret_document.append(item_map)
        # ret_document[ret_profix+str(record_num)] = item_map
        record_num += 1
    print(ret_document)
