"""不涉及到业务逻辑的底层数据处理函数模块"""
import pandas as pd

from air_web.config.config import config
from air_web.data_platform.ruleng_pandas import AnalyticsPandas
from air_web.data_platform.sql_process import MySQLHelper
from air_web.web_flask.common.constants import SCROLL_SIZE
from ruleng.misc_utils import unify_rules


class HBAnalyticsPandas(AnalyticsPandas):
    def slice_query_records(
        self,
        time_spec,
        start_index=0,
        scroll_size=SCROLL_SIZE,
        sort_field=None,
        sort_order="desc",
        _source=False,
        id_field=False,
    ):
        request = {}
        self.time_spec = time_spec
        self.add_time_spec_rule(time_spec)
        self.update_auto_timed_associate_table(time_spec)
        self.rule_gen_query_req(request)
        if "size" in request:
            del request["size"]
        if sort_field:
            request["sort"] = [{sort_field: {"order": sort_order}}]
        if _source:
            request["_source"] = _source
        rr = self.search_new(
            request, _source=False, from_=start_index, size=scroll_size
        )
        res = []

        for r in rr[0]["hits"]["hits"]:
            if id_field:
                r["_source"]["_id"] = r["_id"]
            res.append(r["_source"])
        return res

    def get_recent_scroll_records_sources(self, one_match_res, **kwargs):
        id_field = kwargs.get("id_field", False)
        kwargs.pop("id_field")
        for d in self.get_recent_scroll_records(one_match_res, **kwargs):
            if id_field:
                d["_source"]["_id"] = d["_id"]
            yield d["_source"]


class EsBaseDal(HBAnalyticsPandas):
    """
    继承自AnalyticsPandas，最终继承自ruleng中的Analytics访问es
    """

    ep = None  # 一般情况下不使用更新操作，故默认不初始化该实例

    def __init__(self, es_host=config["ES_HOST"], **kwargs):
        """
        初始化ruleng
        :param es_host:
        """
        self.es_host = es_host
        self.req = EsBaseDal.get_defatul_req()
        super().__init__(host=es_host)

    def add_time_spec_rule(self, time_spec0, offset="0d"):
        idx = 0
        find = 0
        time_spec = time_spec0[:]

        self._match_context.time_range = time_spec
        for item in self.rule:
            if "query" == item[1] and "ts" == item[2]:
                find = 1
                self.rule[idx] = (
                    self.doc_time_field,
                    "query",
                    "ts",
                    [time_spec],
                )
                break
            idx = idx + 1
        if 0 == find:
            self.rule.insert(
                0, (self.doc_time_field, "query", "ts", [time_spec])
            )

    @staticmethod
    def get_defatul_req():
        """
        静态类，获取默认req值
        :return:
        """
        return {
            "scroll": True,
            "ts": "2000-01-01",
            "te": "2100-01-01",
            "doc_time_field": "CREATE_DATE",
        }

    def init_rule(self, rules, index_name, doc_time_field=None, time_out=100):
        """
        初始化规则
        :param rules:
        :param index_name:
        :param doc_time_field:
        :return:
        """
        self.search_index = index_name  # ruleng中父类的目标索引
        self.rule = unify_rules(rules)  # ruleng中父类的规则
        self.redis_off = True  # ruleng中redis，项目不涉及，置为FALSE
        self.doc_time_field = (
            doc_time_field if doc_time_field else self.req["doc_time_field"]
        )
        self.time_out = time_out

    def if_indices(self, index=None):
        """
        判断是否存在索引
        :param index:
        :return:
        """
        return self.elasticsearch.indices.exists(index) if index else False

    def get_group_vector(
        self, rules, index_name, ts=None, te=None, doc_time_field=None
    ):
        res = []
        if self.if_indices(index_name):
            doc_time_field = (
                doc_time_field
                if doc_time_field
                else self.req["doc_time_field"]
            )
            self.init_rule(
                rules=rules,
                index_name=index_name,
                doc_time_field=doc_time_field,
            )
            ts = ts if ts else self.req["ts"]
            te = te if te else self.req["te"]
            ts = [ts, te]
            res = self.vector_rule(ts)
        return res

    def get_same(self, rules, index_name, re_len=True, te=None):
        """
        根据ruleng的same，返回去重的key,只支持一个same
        :param re_len:
        :param te: 结束时间
        :param rules: es 查询规则
        :param index_name: 索引名称
        :return: int
        """
        if self.if_indices(index_name):
            self.init_rule(rules=rules, index_name=index_name)

            ts = [self.req["ts"], te if te else self.req["te"]]
            res = self.vector_rule(ts)
            return len(res) if re_len else res
        return 0 if re_len else []

    def ruleng_query_count(
        self, rules, index_name, te=None, doc_time_field=None
    ):
        """
        查询满足条件的记录数
        :param doc_time_field: 日期字段
        :param te:  结束日期
        :param rules: 规则
        :param index_name: 目标索引
        :return: int
        """
        if not self.if_indices(index_name):
            return 0
        self.init_rule(
            rules=rules, index_name=index_name, doc_time_field=doc_time_field
        )

        ts = [self.req["ts"], te if te else self.req["te"]]
        res = self.vector_rule(ts)
        if len(res) > 0:
            return res[0][1]
        return 0

    def ruleng_query(
        self,
        rules,
        index_name,
        start_index=0,
        scroll=False,
        scroll_size=SCROLL_SIZE,
        sort_field=None,
        doc_time_field=None,
        ts=None,
        te=None,
        id_field=False,
        source=True,
        time_out=100,
    ):
        """
        es查询
        :param ts: 开始时间
        :param te: 结束时间
        :param doc_time_field: es索引中的日期字段
        :param rules: 匹配规则
        :param index_name: 索引名称
        :param start_index: 分页开始位置
        :param scroll:  是否分页
        :param scroll_size: 页容量
        :param sort_field: 排序字段
        :param id_field: 是否包含_id 字段
        :param source: 如果需要过滤部分字段，传入字段list
        :return:
        """
        # ruleng父类的变量修改
        if not self.if_indices(index_name):
            return []
        self.init_rule(
            rules=rules,
            index_name=index_name,
            doc_time_field=doc_time_field,
            time_out=time_out,
        )

        sort_field = sort_field if sort_field else self.req["doc_time_field"]
        ts = [ts if ts else self.req["ts"], te if te else self.req["te"]]
        if scroll:
            res = self.slice_query_records(
                time_spec=ts,
                start_index=start_index,
                scroll_size=scroll_size,
                sort_field=sort_field,
                id_field=id_field,
            )
            return res if len(res) > 0 else []
        else:
            res = self.vector_rule(ts)
            return (
                self.get_recent_scroll_records_sources(
                    one_match_res=res[0], id_field=id_field, source=source
                )
                if len(res) > 0
                else []
            )

    def ruleng_query_scroll(
        self,
        rules,
        index_name,
        start_index=0,
        scroll_size=SCROLL_SIZE,
        sort_field=None,
        doc_time_field=None,
        te=None,
        id_field=False,
    ):
        """针对深度分页>10000条的情况"""
        scroll = True
        if not self.if_indices(index_name):
            return []
        self.req["doc_time_field"] = (
            doc_time_field if doc_time_field else self.req["doc_time_field"]
        )
        self.init_rule(
            rules, index_name, doc_time_field=self.req["doc_time_field"]
        )
        sort_field = sort_field if sort_field else self.req["doc_time_field"]
        ts = [self.req["ts"], te if te else self.req["te"]]
        res = self.vector_rule(ts)
        count = 0
        if res:
            for r in self.get_recent_scroll_records(res[0], sort_field):
                count += 1
                if count > start_index and (
                    count <= start_index + scroll_size or scroll_size == 0
                ):
                    if id_field:
                        r["_source"]["_id"] = r["_id"]
                    yield r["_source"]
                elif scroll and count > start_index + scroll_size:
                    break

    def query_dataframe_scroll(
        self,
        rules,
        index_name,
        start_index=0,
        scroll_size=SCROLL_SIZE,
        sort_field=None,
        doc_time_field=None,
        te=None,
        id_field=False,
    ):
        """针对深度分页>10000条的情况"""
        rr = []
        if self.if_indices(index_name):
            es_res = self.ruleng_query_scroll(
                rules,
                index_name,
                start_index=start_index,
                scroll_size=scroll_size,
                sort_field=sort_field,
                doc_time_field=doc_time_field,
                te=te,
                id_field=id_field,
            )
            for r in es_res:
                rr.append(r)
        doc_time = (
            self.req["doc_time_field"]
            if not doc_time_field
            else doc_time_field
        )
        res = (
            pd.DataFrame(rr).astype({doc_time: "datetime64[ns]"})
            if rr
            else pd.DataFrame()
        )
        res = res.fillna("")
        return res

    def query_dataframe(
        self,
        rules,
        index_name,
        start_index=0,
        scroll=False,
        scroll_size=SCROLL_SIZE,
        sort_field=None,
        doc_time_field=None,
        ts=None,
        te=None,
        id_field=False,
        source=True,
    ):
        """
        查询并返回dataframe
        :param rules: 查询规则
        :param index_name: 索引名称
        :param start_index: 分页开始位置
        :param scroll: 是否滚动（分页）
        :param scroll_size: 滚动大小（分页大小）
        :param sort_field: 排序字段
        :param doc_time_field:
        :param ts: 开始时间
        :param te: 结束时间
        :param id_field: 是否包含_id 列，默认不包含
        :param source: 如果需要过滤部分字段，传入字段list
        :return:
        """
        rr = []
        if self.if_indices(index_name):
            es_res = self.ruleng_query(
                rules,
                index_name,
                start_index=start_index,
                scroll_size=scroll_size,
                scroll=scroll,
                sort_field=sort_field,
                doc_time_field=doc_time_field,
                ts=ts,
                te=te,
                id_field=id_field,
                source=source,
            )
            for r in es_res:
                rr.append(r)
        if rr:
            doc_time = (
                self.req["doc_time_field"]
                if not doc_time_field
                else doc_time_field
            )
            res_df = pd.DataFrame(rr)
            res_df = res_df.fillna("")
            if doc_time in res_df:
                res_df = res_df.astype({doc_time: "datetime64[ns]"})
        else:
            res_df = pd.DataFrame()

        return res_df


class SQLBaseDal(MySQLHelper):
    def __init__(self, **kwargs):
        mysql_db_name = kwargs.get("mysql_db_name", None)
        mysql_db_name = (
            mysql_db_name if mysql_db_name else config["MYSQL_DB_NAME"]
        )
        mysql_config = {
            "MYSQL_HOST": config["MYSQL_HOST"],
            "MYSQL_PORT": config["MYSQL_PORT"],
            "MYSQL_DB_NAME": mysql_db_name,
            "MYSQL_USER": config["MYSQL_USER"],
            "MYSQL_PASSWORD": config["MYSQL_PASSWORD"],
            "MYSQL_ENCODING": config["MYSQL_ENCODING"],
        }
        kwargs = {**kwargs, **mysql_config}
        super().__init__(**kwargs)
