import time
import traceback
from datetime import datetime, timedelta

import numpy as np
import pandas as pd
from elasticsearch.exceptions import ConnectionTimeout, NotFoundError

from air_web.config.config import config
from air_web.data_platform import init_db
from air_web.web_flask.dal.base_dal import EsBaseDal
import ctypes;

class AggCommon:
    """后处理父类"""

    MAX_FIELD = (
        "p_total_sum" if config.get("is_total_max", True) else "p_kt_sum"
    )

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        timedelt=None,
        rewrite=None,
    ):
        my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
        self.logger = logger
        self.task = task
        self.task_id = task["task_id"]
        self.sql_engine = init_db()
        self.sql_engine_cons = init_db(config["MYSQL_CONS_DB_NAME"])
        self.es = my_lib.zElasticsearch(config["ES_HOST"])
        self.es_dal = EsBaseDal(config["ES_HOST"])

        self.step = step
        self.start_time = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
        if self.start_time.date() == datetime.now().date():
            # 如果计算日是今天，那么end_time取现在(因为用户计算结果中存了全天的预测点)
            self.end_time = datetime.now() + timedelta(
                minutes=config["dws_predict_minutes"]
            )
        else:
            self.end_time = datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S")
        self.baseline_end_time = datetime.strptime(
            end_time, "%Y-%m-%d %H:%M:%S"
        )

        self.timedelta = timedelt.split(" ")[1] if timedelt else timedelt
        self.rewrite = rewrite
        # self.save_es = step_dict.get('save_es')

        self.source_table = step_dict.get("source_table")
        self.save_table = step_dict.get("save_table")
        self.dimensions = step_dict.get("dimensions")
        self.indicators = step_dict.get("indicators")
        self.rename = step_dict.get("rename")
        self.flag_field = step_dict.get("flag_field")

        self.res_df = pd.DataFrame()

    def do_bulk(
        self,
        actions,
        parallel=False,
        chunk_size=5000,
        thread=4,
        retry=5,
        raise_on_error=False,
        request_timeout=60,
    ):
        success = 0
        failed = 0
        my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
        try:
            if parallel:
                gen = my_lib.helpers.parallel_bulk(
                    self.es,
                    actions,
                    thread_count=thread,
                    chunk_size=chunk_size,
                    raise_on_error=raise_on_error,
                    request_timeout=request_timeout,
                )
            else:
                gen = my_lib.helpers.streaming_bulk(
                    self.es,
                    actions,
                    max_retries=retry,
                    raise_on_error=raise_on_error,
                    chunk_size=chunk_size,
                    request_timeout=request_timeout,
                )
            for res in gen:
                if res[0]:
                    success += 1
                else:
                    failed += 1
                    print(res[1])
        except:
            print("Caught do_bulk exception")
            traceback.print_exc()
        print(f"import es:{success}/{success + failed}")

    def truncate_table(self):
        """清除数据"""
        self.sql_engine.truncate_table(self.save_table)

    def save_data_to_table(self, no_update_col_list=None):
        """存储数据"""
        if self.res_df.empty:
            self.logger.error(
                "结果数据为空,task_id:{},step:{},star_time:{} ...".format(
                    self.task_id, self.step, self.start_time
                )
            )
            return
        t1 = time.time()
        self.res_df.replace({np.nan: None}, inplace=True)
        self.sql_engine.update_df_by_id(self.res_df, self.save_table,
                                        no_update_col_list=no_update_col_list)
        t2 = time.time()
        if self.rewrite:
            self.logger.info(
                "删除重写mysql数据,time:{},task_id:{},step:{},star_time:{},数据条数:{}".format(
                    t2 - t1,
                    self.task_id,
                    self.step,
                    self.start_time,
                    len(self.res_df),
                )
            )
        else:
            print(
                "写入mysql,time:{},task_id:{},step:{},star_time:{},数据条数:{}".format(
                    t2 - t1,
                    self.task_id,
                    self.step,
                    self.start_time,
                    len(self.res_df),
                )
            )

    def rename_agg_indicators(self):
        """聚合后的数据指标列重命名"""
        self.res_df.rename(columns=self.rename, inplace=True)

    def rename_add_dimensions(self, agg_conf):
        """新添加的数据维度列重命名"""
        for col in self.dimensions:
            if col in agg_conf:
                rename = agg_conf[col]["rename"]
                self.res_df.rename(columns=rename, inplace=True)

    def agg_data(self):
        """根据配置聚合数据"""
        if "highest_type_id" in self.dimensions and "type_code_sort" not in self.dimensions:  # 特殊处理：全行业=高压
            base_df = self.base_df.loc[self.base_df['type_code_sort'] == 1]
            self.res_df = (
                base_df.groupby(self.dimensions)
                .agg(self.indicators)
                .reset_index()
            )
        else:
            self.res_df = (
                self.base_df.groupby(self.dimensions)
                .agg(self.indicators)
                .reset_index()
            )

    def add_agg_dimensions(self, agg_conf):
        """添加聚合维度"""
        add_cols = []
        for col in self.dimensions:
            if col in agg_conf:
                add_cols.extend(agg_conf[col]["columns"])
        self.dimensions.extend(add_cols)

    def add_max_time_flag(self):
        """"""
        # 获取当天最大点的数据
        group_field = ["org_no", "type_id"]
        day_max_df = self.get_day_max(group_field)
        day_max_df["is_day_max"] = 1

        # 给聚合结果添加最大点标识字段
        self.res_df = self.res_df.merge(
            day_max_df[["org_no", "type_id", "data_time", "is_day_max"]],
            on=["org_no", "type_id", "data_time"],
            how="left",
        )
        self.res_df["is_day_max"].fillna(0, inplace=True)

    def get_day_max(self, group_field):
        group_field.append("data_date")
        day_max_df = self.res_df.sort_values(self.MAX_FIELD, ascending=False)
        day_max_df["data_date"] = pd.to_datetime(
            day_max_df["data_time"]
        ).dt.date
        day_max_df.drop_duplicates(group_field, inplace=True)
        return day_max_df

    def get_baseline_date(self):
        current_date = self.start_time.strftime("%Y-%m-%d")
        this_month_start = datetime.strptime(current_date, "%Y-%m-%d").replace(
            day=1
        )
        last_month_end = this_month_start - timedelta(days=1)
        last_month_start = last_month_end.replace(day=1)
        aclr_weather_hour = config.get(
            "ACLR_WEATHER_HOUR", "aclr_weather_hour"
        )

        date_list = pd.date_range(
            start=last_month_start, end=last_month_end, freq="1d"
        ).strftime("%Y-%m-%d")
        tmp_list = []
        for date in date_list:
            rules = [
                ("datetime", "query", "=", date),
                ("tmp", "stat", "max", ">", "0"),
            ]

            res = self.es_dal.get_group_vector(
                rules, aclr_weather_hour, doc_time_field="datetime"
            )
            if len(res) > 0:
                tmp = round(res[0][1], 2)
                tmp_list.append({"date": date, "tmp": tmp})
            else:
                print(f"缺少天气数据:{date}")
        base_tmp_df = pd.DataFrame(tmp_list)

        rules = [
            ("datetime", "query", "=", current_date),
            ("tmp", "stat", "max", ">", "0"),
        ]
        res = self.es_dal.get_group_vector(
            rules, aclr_weather_hour, doc_time_field="datetime"
        )
        if len(res) == 0:
            self.logger.warning("天气数据未查询到,无法判断基线日,data_time:{}".format(current_date))
            return None
        today_tmp = round(res[0][1], 2)

        greater_df = base_tmp_df[base_tmp_df["tmp"] >= today_tmp].sort_values(
            "tmp"
        )
        if not greater_df.empty:
            baseline_day = greater_df.iloc[0]["date"]
        else:
            smaller_df = base_tmp_df[
                base_tmp_df["tmp"] < today_tmp
            ].sort_values("tmp", ascending=False)
            baseline_day = smaller_df.iloc[0]["date"]
        self.logger.info(
            "今日:{}, 温度：{}，基线日:{}".format(current_date, today_tmp, baseline_day)
        )
        return baseline_day

    def get_temperature_data(self):
        rules = [
            ("datetime", "query", ">=", self.start_time),
            ("datetime", "query", "<", self.end_time),
        ]
        index_name = config.get("ACLR_WEATHER_HOUR", "aclr_weather_hour")
        tmp_df = self.es_dal.query_dataframe(
            rules,
            index_name,
            doc_time_field="datetime",
            source=["tmp", "region_code"],
        )
        if tmp_df.empty:
            self.logger.warning("天气数据未查询到,data_time:{}".format(self.start_time))
            return tmp_df
        tmp_df = (
            tmp_df.groupby("region_code")["tmp"]
            .agg(["max", "min"])
            .reset_index()
        )
        tmp_df.rename(
            columns={"max": "max_tmp", "min": "min_tmp"}, inplace=True
        )
        return tmp_df

    def proc_agg_data(self, agg_res):
        res_list = []
        for bucket in agg_res:
            key_list = bucket["key"].split("|")
            bucket_dict = {}
            for idx, col in enumerate(self.agg_columns):
                value = (
                    int(key_list[idx]) if col == "type_id" else key_list[idx]
                )
                bucket_dict.update({col: value})
            for key, val_dict in bucket.items():
                if key in ["key", "doc_count"]:
                    continue
                bucket_dict.update({key: val_dict["value"]})
            res_list.append(bucket_dict)
        df = pd.DataFrame(res_list)
        return df

    def search_es(self, index_name, query, on7, add_log=""):
        t1 = time.time()
        result = None
        for i in range(10):
            try:
                result = self.es.search(
                    index=index_name, body=query, request_timeout=300
                )
                break
            except NotFoundError:
                self.logger.warning(
                    "索引不存在,task_id:{},step:{},star_time:{},index:{},org:{} ...".format(
                        self.task_id,
                        self.step,
                        self.start_time,
                        index_name,
                        on7,
                    )
                )
                break
            except ConnectionTimeout:
                self.logger.warning(
                    "search失败次数:{},task_id:{},step:{},star_time:{},index:{},org:{},{} ...".format(
                        i + 1,
                        self.task_id,
                        self.step,
                        self.start_time,
                        index_name,
                        on7,
                        add_log,
                    )
                )
        t2 = time.time()
        self.logger.info(
            "search_time:{},task_id:{},step:{},star_time:{},index:{},org:{},{} ...".format(
                t2 - t1,
                self.task_id,
                self.step,
                self.start_time,
                index_name,
                on7,
                add_log,
            )
        )
        agg_res = (
            result["aggregations"]["group_by"]["buckets"] if result else []
        )
        if len(agg_res) == 0:
            self.logger.warning(
                "数据为空，跳过task_id:{},step:{},star_time:{},index:{},org:{}...".format(
                    self.task_id,
                    self.step,
                    self.start_time,
                    index_name,
                    on7,
                    add_log,
                )
            )
        return agg_res

    def get_baseline_idx_name(self):
        baseline_idx = config.get("BASELINE_IDX", "aclr_res_baseline-date")
        index_name = baseline_idx.replace(
            "date", self.start_time.strftime("%Y.%m.%d")
        )
        return index_name

    @staticmethod
    def get_power_96_idx_name(on5, on7):
        is_cal = "1" if config["c_cons_filter_is_cal"] else "*"
        index_name = (
            config["POWERLOAD"]
            .replace("on5", str(on5))
            .replace("on7", str(on7))
            .replace("cal01", is_cal)
        )
        return index_name

    def get_es_query(
        self,
        on7,
        group_source,
        aggs_dict,
        start_time=None,
        end_time=None,
        add_query=None,
        must_not_query=None,
    ):
        start_time = start_time if start_time else self.start_time
        end_time = end_time if end_time else self.end_time

        query = {
            "size": 0,
            "query": {
                "bool": {
                    "must": [
                        {
                            "range": {
                                "data_time": {
                                    "gte": start_time,
                                    "lt": end_time,
                                }
                            }
                        },
                    ]
                }
            },
            "aggs": {
                "group_by": {
                    "terms": {
                        "size": 10000,
                        "script": {
                            "source": group_source,
                            "lang": "painless",
                        },
                    },
                    "aggs": aggs_dict,
                }
            },
        }

        if add_query:
            query["query"]["bool"]["must"].extend(add_query)
        if must_not_query:
            query["query"]["bool"].update(must_not_query)

        if str(on7)[:2] == "13":
            filter_query = {"range": {"p_total": {"gte": 0, "lt": 8000000}}}
            query["query"]["bool"]["must"].append(filter_query)
        return query

    def get_aggs_dict(self):
        """拼接聚合的指标字段及聚合方式"""
        aggs_dict = {}
        for agg_type in self.agg_types:
            for indic_field in self.indicators:
                es_agg_type = (
                    "value_count" if agg_type == "count" else agg_type
                )  # 查询es count的特殊处理
                aggs_dict.update(
                    {
                        f"{indic_field}_{agg_type}": {
                            es_agg_type: {"field": indic_field}
                        }
                    }
                )
        return aggs_dict

    def get_group_source(self):
        """拼接聚合维度的字段"""
        group_source = ""
        for idx, key in enumerate(self.agg_columns):
            add_source = (
                f"+'|'+doc['{key}'].value" if idx else f"doc['{key}'].value"
            )
            group_source += add_source
        return group_source
