# -*- coding = utf-8 -*-
"""
# @Author       :   howtime
# @CreateTime   :   2021/9/18
# @File         :   es_utils.py
# @Desc         :   ...
# @Version      :   v0.0
# @Update       :   2021/9/18
"""
import json
import re
import jsonlines
import requests
from pathlib import Path
from tqdm import tqdm, trange
from elasticsearch import Elasticsearch, helpers


class EsUtils:
    def __init__(self, host, port=9200):
        self.host = host
        self.ES = Elasticsearch(f'http://{host}:{port}')

    def batch_upload(self, data_path='', batch_size=10000):
        '''
        数据库批量上传, python安装的elasticsearch包版本需低于7.10.0
        :param data_path: 需上传数据路径, json格式, 内容格式如下(列表嵌套字典):
                          [{'字段名1': '示例1', '字段名2': [1, 2, 3], '字段名3': 99},
                           {'字段名1': '示例2', '字段名2': 99, '字段名3': ['上', '下']}]
        :param batch_size: 一个批次上传数据数量, 默认10000
        索引名默认为文件名
        '''
        if Path(data_path).suffix == '.jsonlines':
            contents = list(jsonlines.open(data_path))
        else:
            contents = json.load(open(data_path, 'r'))['RECORDS']

        index = Path(data_path).stem
        for num in trange(0, len(contents), batch_size):
            action = []
            for content in contents[num:num + batch_size]:
                dic = {"_index": index, "_type": index, "_source": content}
                action.append(dic)

            helpers.bulk(self.ES, action)

    def download_all_data(self, index, save_dir):
        """
        下载某es库中所有数据, 并保存为jsonlines
        :param index: 数据库名称
        :param save_path: 下载文件保存路径
        :param max_num: 数据读取上限, 根据数据库中数据总数进行设置, 需大于等于总数
        :return: 如报错, 则需手动修改数据库可读上限
        """
        total = json.loads(requests.get(f'http://{self.host}:9200/{index}/_count').text)['count']
        # requests.put(f'http://{self.host}:9200/{index}/_settings', json.dumps({"max_result_window": total}))
        if total > 10000:
            content = []
            body = {"query": {"bool": {"must": [{"match_all": {}}]}}, "from": 0, "size": 10000}
            res = self.ES.search(body=body, index=index, scroll='10s')
            content += res['hits']['hits']
            sid = res['_scroll_id']
            for i in range(10000, total, 10000):
                if i != total:
                    content += self.ES.scroll(scroll_id=sid, scroll='10s')['hits']['hits']

        else:
            body = {"query": {"bool": {"must": [{"match_all": {}}]}}, "from": 0, "size": total}
            content = self.ES.search(body=body, index=index)['hits']['hits']

        f = jsonlines.open(f"{save_dir}/{index}.jsonlines", 'w')
        [f.write(i['_source']) for i in tqdm(content)]

    def fuzzy_total_index(self, text: str, db_name: str, rt_num=10):
        """
        在某库所有字段中模糊匹配
        :param text: 待匹配字符串
        :param db_name: 待匹配数据库名称
        :param rt_num: 返回结果最大数量, 默认返回10条结果
        :return: 结果列表
        """
        text = re.sub('[-]', '', text)
        query = {"query": {"query_string": {"query": text}}, "size": rt_num, "from": 0, "sort": []}
        matched_lst = self.ES.search(body=query, index=db_name)['hits']['hits']
        return matched_lst

    def fuzzy_single_index(self, text: str, index: str, db_name: str, rt_num=10):
        """
        在某库某单一字段中模糊匹配
        :param text: 待匹配字符串
        :param index: 待匹配字段名称
        :param db_name: 待匹配数据库名称
        :param rt_num: 返回结果最大数量, 默认返回10条结果
        :return: 结果列表
        """
        query = {'query': {'bool': {'must': [{'match': {index: text}}]}}, 'size': rt_num}
        matched_lst = self.ES.search(body=query, index=db_name)['hits']['hits']
        return matched_lst

    def acc_single_index(self, text: str, index: str, db_name: str):
        """
        在某库某单一字段中精准匹配
        :param text: 待匹配字符串
        :param index: 待匹配字段名称
        :param db_name: 待匹配数据库名称
        :return: 结果列表
        """
        query = {'query': {'term': {f"{index}.keyword": text}}}
        matched_lst = self.ES.search(body=query, index=db_name)['hits']['hits']
        return matched_lst


if __name__ == '__main__':
    es = EsUtils('192.168.8.240')

    es.download_all_data('药品归一化库', '/home/kv/workspace/归一化库/药品')
