# -*- coding: utf-8 -*-
# @Author  : suny570
# @Time    : 2022-04-04 22:12

import os
import sys
import time
import pandas as pd

from src.beans.field_beans import FieldsBean
from src.db_es.build_mappings import build_standard_index, build_news_index
from src.utils_local.fileutil import read_file_get_data_standards
from src.utils_local.fileutil import read_file_get_data_news
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
rootPath = os.path.split(rootPath)[0]
# 先引入目录的上级就好
sys.path.append(rootPath)

from src.db_es.ElasticSearchClass import ElasticSearchClass
from src.db_es.es_config import ES_Config

# 大批量增加数据，没有实践环境，暂停
# def export_excel(client ,file_path, index_name, document_type):
#     """Dumping data in bulk to elasticsearch in chunk of 5000."""
#     data = read_file_get_data(file_path, index_name, document_type)
#     print(len(data))
#     for i in range(0, len(data), 5000):
#         try:
#             client.create_index_bulk(data[i: i + 5000])
#         except Exception as e:
#             print(str(e))

def insert_local_data_into_es(es_client, index_name, doc_type, file_, file_flag):
    """
    从文件中读取数据，并存储到es中
    :param file: 文件包括完整路径, 不是真正的Excel文件
    :return:
    """
    meta_bean = FieldsBean()
    fields_arr = meta_bean.get_standards_fields_list()
    list = read_file_get_data_standards(file_, index_name, doc_type, fields_arr, file_flag)
    #dates_set = load_release_date_dict(es_client,index_name)
    idx = es_client.count(index_name, doc_type) + 1
    #idx = 1
    #print('the begin index is ' + str(idx))
    for item in list:
        # if idx > 10:
        #     break
        #if item['_source']['id'] not in dates_set:
        # 新数据入库
        res = es_client.create_index_used_type_id_body(index_name, doc_type, idx, item["_source"])
        if res is not None:
            print("successed line is \t" + str(idx))
        idx += 1

def insert_local_news_data_into_es(es_client, index_name, doc_type, file_):
    """
    从文件中读取数据，并存储到es中
    :param file: 文件包括完整路径, 不是真正的Excel文件
    :return:
    """
    #首先实例化
    meta_bean = FieldsBean()
    #调用FieldsBean类里面的get_news_fields_list
    fields_arr = meta_bean.get_news_fields_list()
    list = read_file_get_data_news(file_, index_name, doc_type,fields_arr)
    #dates_set = load_release_date_dict(es_client,index_name)
    idx = es_client.count(index_name, doc_type) + 1
    #print('the begin index is ' + str(idx))
    for item in list:
        #if item['_source']['id'] not in dates_set:
        # 新数据入库
        res = es_client.create_index_used_type_id_body(index_name,doc_type, idx, item["_source"])
        if res is not None:
            print("successed line is \t" + str(idx))
        idx += 1

# 加载历史ID，用于过滤重复的数据
def load_release_date_dict(es_handle, index_name):
    all_response = es_handle.search(index_name, 50000)
    dates_set = set()
    if all_response is None:
        pass
    else:
        jsons = all_response["hits"]["hits"]
        for hits in jsons:
            dates_set.add(hits['_source']['id'])

        print("ES DB Contains release_date numbers \t" + str(len(dates_set)))
    return dates_set


def es_search_test():
    es_config = ES_Config()
    jsons = es_handle.search_used_field_and_item_body(es_config.index_names['stand']['index'], es_config.type_names['type'],
                                                      "question", "珠穆朗玛峰有多高", 100)
    # jsons, high_jsons= es_handle.search_used_field_match_item(es_config.index_names['stand']['index'], es_config.type_names['type'],
    # "question", "北京2008年夏季奥运会", 2)
    # jsons_ans = es_handle.search_used_field_match_item(es_config.index_names['stand']['index'], es_config.type_names['type'], "answer",
    #                                                "月亮山主峰海拔1490.3米，相对高差1100余米，山体雄伟高大，沟谷切割深长。", 1)
    ret_document = []
    print(jsons)
    ### 检索字段并保存字段值
    for hits in jsons:
        ret_document.append(hits["answer"])
    print(str(ret_document))

    ret_document = []
    ret_profix = 'rank_number_'
    record_num = 1
    for hits in jsons:
        item_map = {}
        item_map['data_id'] = hits["id"]
        item_map['user_id'] = hits["user_id"]
        item_map['title'] = hits["title"]
        item_map['content_type'] = hits["content_type"]
        item_map['content'] = hits["answer"]
        item_map['standard_question'] = hits["question"]
        item_map['content_link'] = ''
        item_map['content_ana_type'] = ''
        item_map['status'] = hits['status']
        item_map['create_time'] = hits['create_time']
        item_map['update_time'] = hits["update_time"]
        item_map['rank_num'] = ret_profix + str(record_num)
        ret_document.append(item_map)
        # ret_document[ret_profix+str(record_num)] = item_map
        record_num += 1
    print(ret_document)
if __name__ == '__main__':
    # ES 连接
    es_config = ES_Config()
    #测试环境
    es_handle = ElasticSearchClass(es_config.env_names['test']['HOST'], es_config.env_names['test']['PORT'],
                                          es_config.env_names['test']['USER'], es_config.env_names['test']['PASSWORD'])

    ##生产环境
    # es_handle = ElasticSearchClass(es_config.env_names['product']['HOST'], es_config.env_names['product']['PORT'],
    #                                       es_config.env_names['product']['USER'], es_config.env_names['product']['PASSWORD'])

    ##插入数据
    # begin_time = time.time()
    # data_path = r'F:\AI_work\ES_based on search\corpus_class\S2_data.csv'
    # # 增加从csv文件导入数据的方式，csv文件只能有两列['question']和['answer']。如果从txt文件上传数据，每条数据的问题和答案用\t隔开，每条数据
    # #作为一行。insert_local_data_into_es增加一个传参，file_flag='csv_file' 或者file_flag='txt_file'
    # file_flag = 'csv_file' ##传数据集的时候，注意修改
    # insert_local_data_into_es(es_handle, es_config.index_names['province']['0000'], es_config.type_names['type'],
    #                           data_path, file_flag)
    # end_time = time.time()
    # times = end_time - begin_time
    # print(times)

    # # 删除
    # es_handle.delete_doc_used_id(es_config.index_names['stand']['index'], es_config.type_names['type'], 100019207)
    ###根据id删除数据
    # df_id = [100019207,100019206,100019205,100019204]
    # ii = 9995
    # for i in range(0, 2):
    #     df_id.append(ii)
    #     ii += 1
    #
    # for id_no in df_id:
    #     try:
    #         es_handle.delete_doc_used_id(es_config.index_names['province']['0000'], es_config.type_names['type'], id_no)
    #         print('id为:', id_no, '的记录删除成功')
    #     except:
    #         print('找不到id为:', id_no, '的记录')
    # es_handle.delete_doc_used_id(es_config.index_names['province']['0036'], es_config.type_names['type'], 100000084)
    # es_handle.delete_doc_used_id(es_config.index_names['stand']['index'], es_config.type_names['type'], 1)
    # es_handle.delete_index(es_config.index_names['news']['index'])

    # 总数目
    # print("标准数据总数",str(es_handle.count(es_config.index_names['stand']['index'], es_config.type_names['type'])))


    # #搜索测试
    # res_list = es_handle.search(es_config.index_names['province']['0000'], 10)['hits']['hits']
    # for item in res_list:
    #    print("搜索得到的数据为", str(item))

    # 标准问题搜索测试
    ret_field_list = es_handle.search_used_field_match_item(es_config.index_names['province']['0013'], es_config.type_names['type'],
                      "question", "公司合同", 10)

    print(len(ret_field_list))
    print(ret_field_list)

    # ret_field_list = es_handle.search_used_field_match_item(es_config.index_names['stand']['index'], es_config.type_names['type'],
    #                                                         "question", "《先主传》出自哪里？", 5)
    # if ret_field_list is not None:
    #     for fid in ret_field_list:
    #         print(str(fid))
    #     else:
    #         print("no search result return")

    # ret_json = es_handle.search_used_id(es_config.index_names['stand']['index'], es_config.type_names['type'], 23)
    # print(ret_json)
    # count = es_handle.count(es_config.index_names['stand']['index'], es_config.type_names['type'])
    # print(count)

    # print(es_handle.update_used_id_field_item(es_config.index_names['stand']['index'], es_config.type_names['type'], 'exter_1', 5))
    # print(es_handle.update_used_id_field_item(es_config.index_names['stand']['index'], es_config.type_names['type'], 'exter_1', 1))

    # ret_json = es_handle.search_top_records(es_config.index_names['stand']['index'], es_config.type_names['type'], 'exter_1', 0, 10)
    # print(ret_json)
    # es = ElasticSearchClient.get_as_server() # 创建es连接




