import time
import multiprocessing

from typing import Dict

from .u_exception import ErrorLogger, exception_capture_decorator
from .common_func.print_logger import PrintLogger as Logger
import_error_warned = False
try:
    from elasticsearch import Elasticsearch, TransportError, NotFoundError
except ModuleNotFoundError:
    if not import_error_warned:
        Logger.warning("UElasticSearch is not valid, because require module:[elasticsearch] is not installed. "
                       "If you want use UElasticSearch, please install [elasticsearch] first")
        import_error_warned = True


MAX_SCROLL_SIZE = 10000
MIN_SCROLL_SIZE = 100


class UElasticSearch(object):
    """
    Elasticsearch管理类
    主要功能：
        （1）连接Elasticsearch
        （2）获取索引信息
        （3）根据某个字段进行精确查询，并支持分页功能
        （4）自定义查询
    """

    def __init__(self, conn_config: dict, logger):
        self._logger = logger
        self._logger.debug(f"Create a UES object with config: {conn_config}")
        self._host = conn_config.get("host", "localhost")
        self._port = conn_config.get("port", "9200")
        self._user = conn_config.get("user", "elastic")
        self._password = conn_config.get("password", "elastic")
        self._es_client = Elasticsearch(
            hosts=f"http://{self._host}:{self._port}",
            http_auth=(self._user, self._password)
        )
        self._logger.debug(f"es client: {self._es_client.info}")
        cur_process = multiprocessing.current_process()
        logger_name = cur_process.name
        ErrorLogger.add_logger(logger_name, logger)
        self.last_flush = time.time()  # 为防止连接被服务端重置，每过一分钟做一次重置，操作触发
        self.max_scroll_size = MAX_SCROLL_SIZE

    def flush_client(self):
        if not self._es_client.ping():
            self._es_client = Elasticsearch(
                hosts=f"http://{self._host}:{self._port}",
                http_auth=(self._user, self._password)
            )
            self.last_flush = time.time()

    @exception_capture_decorator(_return=dict())
    def get_index_info(self) -> dict:
        """
        获取所有索引信息
        """
        self.flush_client()
        return self._es_client.indices.get(index='*')

    @exception_capture_decorator(_return=(dict(), None))
    def search_by_field_must_term(self,
                                  search_index: str,
                                  field: str,
                                  value: str,
                                  page_size: int = 1000,
                                  sort_field: str = "_id",
                                  order: str = "desc",
                                  search_after: str = None) -> tuple:
        """
        按字段搜索，必须满足查询条件，精确查询
        :param search_index: 待查寻的索引名称
        :param field: 查询的字段名称
        :param value: 查询的字段值
        :param page_size: 分页查询中，每页查询的数据条数
        :param sort_field: 查询结果按照该字段进行排序
        :param order: 排序方式，desc-递减；asc-递增
        :param search_after: 分页查询的游标，首次查询时为None，后续查询使用前一查询返回的search_after值
        :return: (result-查询结果, search_after-游标)
        """
        self.flush_client()
        search_query = {
            "query": {
                "term": {
                    field: {
                        "value": value
                    }
                }
            },
            "size": page_size,
            "track_total_hits": True
        }
        if sort_field:
            search_query["sort"] = {
                sort_field: order
            }

        if search_after:
            search_query['search_after'] = search_after

        result = self._es_client.search(index=search_index, body=search_query)
        if result['hits']['hits']:
            search_after = result['hits']['hits'][-1].get('sort', None)
        else:
            search_after = None

        return result, search_after

    @exception_capture_decorator(_return=dict())
    def search(self, search_index: str, search_query: dict, scroll=None) -> dict:
        """
        自定义搜索
        :param search_index: 查询索引名称
        :param search_query: 查询体
        :param scroll
        :return: result-查询结果
        """
        self.flush_client()
        if scroll:
            return self._es_client.search(index=search_index, body=search_query, scroll=scroll)
        else:
            return self._es_client.search(index=search_index, body=search_query)

    def accurate_query(self, search_index : str, size:int=1000, **kwargs) -> list:
        query = {"query":{"term": kwargs}, "size": size}
        return self.get_doc(search_index=search_index, query=query, size=size)

    @exception_capture_decorator(_return=list())
    def get_doc(self, search_index : str, query : Dict, size:int=1000) -> list:
        self.flush_client()
        query['size'] = size
        res = self._es_client.search(index=search_index,body=query)
        return self._get_doc_source(res)

    @exception_capture_decorator(_return=0)
    def count(self,  search_index: str, query: Dict) -> int:
        _count = self._es_client.count(index=search_index, body=query)
        return int(_count['count'])

    @exception_capture_decorator(_return=list())
    def get_from_size(self, search_index: str, query: Dict) -> list:
        res_list = list()
        self.flush_client()
        count = self._es_client.count(index=search_index, body=query)
        count = int(count['count'])
        self._logger.debug(f"Total {count} res")
        try:
            _from = -1
            remain = count
            while remain:
                _from += 1
                _index = int(remain / 500)
                if _index:
                    this_size = 500
                    remain = remain - 500
                else:
                    this_size = remain
                    remain = 0
                _start = _from * 500
                query["size"] = this_size
                query["from"] = _start
                self._logger.debug(f"query from {_start} res, size:{this_size}")
                res = self._es_client.search(index=search_index, body=query)
                res_list = res_list + self._get_doc_source(res)
            self._logger.debug(f"total {count} res, got {len(res_list)} res")
        except Exception as e:
            self._logger.warning(f"query by size error, maybe data count:{count} is big then serv limit, "
                                 f"try by func:get_large_doc, error line: {e.__traceback__.tb_lineno}, error msg:{e}")
            if "size" in query:
                query.pop("size")
            if "from" in query:
                query.pop("from")
            return self.get_large_doc(search_index=search_index, query=query)
        return res_list

    @exception_capture_decorator(_return=list())
    def get_large_doc(self, search_index: str, query: Dict) -> list:
        self.flush_client()
        scroll_size = query.get("size", self.max_scroll_size)
        query['size'] = scroll_size
        res = dict()
        try:
            res = self._es_client.search(index=search_index, body=query, scroll='2m')
            self._logger.debug(f"[ES:get_large_doc]res: {res}")
        except TransportError as err:
            if int(err.status_code) == 500 and 'Trying to create too many scroll contexts' in str(err):
                str_err = str(err)
                right_pos = str_err.find("]. This limit")
                left_str = "Must be less than or equal to: ["
                left_pos = str_err.find(left_str)
                self.max_scroll_size = int(str_err[left_pos + len(left_str):right_pos])
                self._logger.warning(f"ES search size:[{scroll_size}] too large, reduce it to max_open_scroll_context:{self.max_scroll_size} "
                                     f"of serv and retry, warning msg: {err}")
                self.flush_client()
                query['size'] = self.max_scroll_size
                try:
                    res = self._es_client.search(index=search_index, body=query, scroll='2m')
                    self._logger.info(f"Es retry with size={self.max_scroll_size}, OK!")
                except Exception as e:
                    self._logger.warning(
                        f"Still Failed, ES search reduce it to max_open_scroll_context:{self.max_scroll_size} "
                        f"of serv and retry is still failed, err msg: {e}, error line:{e.__traceback__.tb_lineno}")
                    res = dict()
        except Exception as e:
            self._logger.warning(f"ES search size:[{scroll_size}] too large, reduce it and retry, warning msg: {e}, "
                                 f"error line:{e.__traceback__.tb_lineno}")
            self.flush_client()
            query['size'] = MIN_SCROLL_SIZE
            try:
                res = self._es_client.search(index=search_index, body=query, scroll='2m')
                self._logger.info(f"Es retry with size={MIN_SCROLL_SIZE}, OK!")
            except Exception as e:
                self._logger.warning(
                    f"Still Failed, ES search reduce it to {MIN_SCROLL_SIZE} "
                    f"and retry is still failed, err msg: {e}, error line:{e.__traceback__.tb_lineno}")
                res = dict()

        data_list = list()
        scroll_index = 0
        last_scroll_id = None
        while len(res['hits']['hits']):
            scroll_index += 1
            this_scroll_list = self._get_doc_source(res)
            self._logger.debug(f"[ES:get_large_doc]scroll index:{scroll_index}, get {len(this_scroll_list)} data")
            data_list = data_list + this_scroll_list
            scroll_id = res.get('_scroll_id')
            if not scroll_id:
                self._es_client.clear_scroll(scroll_id=last_scroll_id)
                break
            res = self._es_client.scroll(scroll_id=scroll_id, scroll='2m')
            last_scroll_id = scroll_id


        self._logger.info(f"[ES:get_large_doc]search index: {search_index} done! got {len(data_list)} data finally!"
                          f" query body:{query}")
        return data_list

    @staticmethod
    def _get_doc_source(query_res) -> list:
        """
        从查询结果中获取数据
        :param query_res
        """
        data_list = []
        hits = query_res.get("hits")

        # total 可能为Dict或int
        if isinstance(hits.get('total'), Dict):
            total_value = hits.get('total').get('value')
        else:
            total_value = hits.get('total')
        if total_value > 0:
            for data in hits.get('hits'):
                data_list.append(data.get('_source'))
        return data_list

    @exception_capture_decorator(_return=dict())
    def insert(self, index : str, doc_type: str, body: dict):
        self.flush_client()
        res = self._es_client.index(index=index, doc_type=doc_type, body=body)
        self._logger.debug(f"ES insert, doc={body}, res:{res}")
        return res

    def paginated_search(self,
                         search_index: str,
                         search_condition: dict = None,
                         page_size: int = 1000,
                         sort_dict: dict = None,
                         search_after: str = None) -> tuple:
        """
        按字段搜索，必须满足查询条件，精确查询
        :param search_index: 待查寻的索引名称
        :param search_condition: 查询的内容，es查询的query部分
        :param page_size: 分页查询中，每页查询的数据条数
        :param sort_dict: 查询结果按照该字段进行排序  格式形如： {"key1":"desc","key2":"asc"}
        :param search_after: 分页查询的游标，首次查询时为None，后续查询使用前一查询返回的search_after值
        :return: (result-查询结果, search_after-游标)
        """
        if not search_condition:
            search_condition = {
                "bool": {
                    "must": [],
                    "must_not": [],
                    "should": []
                }
            }
        try:
            search_query = {
                "query": search_condition,
                "size": page_size,
                "track_total_hits": True
            }
            if sort_dict:
                search_query["sort"] = sort_dict

            if search_after:
                search_query['search_after'] = search_after

            result = self._es_client.search(index=search_index, body=search_query)
            if result['hits']['hits']:
                search_after = result['hits']['hits'][-1].get('sort', None)
            else:
                search_after = None
            return result, search_after
        except NotFoundError:  # 当前查询的索引不存在
            return "NotFoundError", None
        except Exception as e:
            self._logger.error(f"search after exception, search condition: {search_condition}, error info: {e}")
            return "ESError", None

    def insert_document(self, index_name, doc_id=None, document=None):
        """
        将文档插入到 Elasticsearch 索引中

        :param index_name (str): 要插入文档的索引名称
        :param doc_id (str, optional): 文档ID，如果为None则ES会自动生成一个ID
        :param document (dict): 要插入的文档数据，必须是字典格式

        :return: dict: 包含success字段和相关结果信息的字典
                 成功: {"success": True, "data": es_response}
                 失败: {"success": False, "error": error_message}
        """
        # 检查文档是否有效
        if document is None or not isinstance(document, dict):
            error_msg = "doc not a dict"
            self._logger.error(error_msg)
            return {"success": False, "error": error_msg}

        try:
            self.flush_client()

            params = {}

            self._logger.debug(f"insert doc to {index_name}，doc_id: {doc_id if doc_id else 'autogen'}")

            if doc_id:
                result = self._es_client.index(
                    index=index_name,
                    id=doc_id,
                    body=document,
                    params=params
                )
            else:
                result = self._es_client.index(
                    index=index_name,
                    body=document,
                    params=params
                )

            self._logger.debug(f"insert doc to {index_name}，result: {result}")
            return {"success": True, "data": result}

        except Exception as e:
            error_msg = f"insert ERROR! index: {index_name} err: {str(e)}"
            self._logger.error(error_msg)
            return {"success": False, "error": error_msg}

    @exception_capture_decorator(_return=False)
    def is_index_exist(self,index_name):
        """
        查询索引是否存在
        :param index_name: 索引名称
        """
        self.flush_client()
        return self._es_client.indices.exists(index=index_name)

    @exception_capture_decorator(_return=None)
    def create_es_index(self,index_name, target_fields):
        """
        创建 ES 索引，仅对目标字段创建索引
        :param index_name: 索引名称
        :param target_fields: 需要建索引的字段定义
          格式：{
                "recode_time": {"type": "date", "format": "yyyy-MM-dd HH:mm:ss"},
                "url": {"type":"text"},  # 全文字段
                "api_id": {"type":"integer"}  # ID类数值字段
            }
        """
        self.flush_client()
        # 若索引存在则不再创建
        if self._es_client.indices.exists(index=index_name):
            self._logger.debug(f"index {index_name} already exists")
            return

        # 定义映射模板
        mapping = {
            "mappings": {
                "dynamic_templates": [
                    {
                        "disabled_index_fields": {  # 自动禁止未定义的字段创建索引
                            "match": "*",
                            "match_mapping_type": "*",
                            "mapping": {"index": False}
                        }
                    }
                ],
                "properties": {
                    # 仅对目标字段定义索引类型
                    **target_fields
                }
            }
        }

        # 创建索引
        self._es_client.indices.create(index=index_name, body=mapping)


if __name__ == "__main__":
    class PrintLogger:
        debug = print
        info = print
        warning = print
        error = print

    _config = {
        "host" : "localhost",
        "port" : "9200",
        "user" : "elastic",
        "password" : "elastic"
    }
    h_es = UElasticSearch(_config, PrintLogger)
    print(h_es.search(search_index="jg_log", search_query={"query":{"term": {"gid": "d4c3f5d0-5840-49c8-a9d4-3556bea3a4af"}}}))
    a = {"gid": "d4c3f5d0-5840-49c8-a9d4-3556bea3a4af"}
    c =h_es.accurate_query("jg_log", **a)
    if c:
        b = c[0]
        print(b["source_app"])
        print(b["source_app_type"])
        print(b["details"])
        print(b["logtime"])
    _query = {
        "query": {
            "bool": {
                "must": [
                    {"range": {"operation_time": {"gte": int(time.time()*1000 - 6000000), "lte": int(time.time()*1000)}}},
                    {"term": {"source_app": "apiAudit"}},
                    {"term": {"source_app_type": "api_audit_sess"}},
                    {"exists": {"field": "api_path"}},
                    {"exists": {"field": "sub_ip"}},
                    {"exists": {"field": "server_address"}},
                    {"exists": {"field": "full_url"}}
                ]
            }
        }
    }
    print(h_es.get_large_doc(search_index="jg_log", query=_query))
