"""基础业务逻辑模块"""
import logging
import traceback
from datetime import datetime, timedelta

import numpy as np
import pandas as pd
from kafka import KafkaProducer

from air_web.config.config import config
from air_web.dw.data_mapping import DataType
from air_web.web_flask.bll.model_map import ModelMap

# from web_flask.common.constants import AREA_MAX_TIME, SOURCE_TYPE
from air_web.web_flask.common.data_mapping import (
    AlertLevel,
    AlertType,
    ArrangMode,
    EquipType,
    InvertType,
    ProcStatus,
)
from air_web.web_flask.dal.base_dal import EsBaseDal, SQLBaseDal

logger = logging.getLogger()


class BaseBll(object):
    es_dal = None
    sql_dal = None
    data_source = config["SOURCE_TYPE"]
    AREA_MAX_TIME = config["AREA_MAX_TIME"]

    CONVERT_CODE_TO_CHINESE = {
        "EQUIP_TYPE": EquipType,
        "ALERT_LEVEL": AlertLevel,
        "ALERT_TYPE": AlertType,
        "PROC_STATUS": ProcStatus,
        "ARRANG_MODE": ArrangMode,
        "INVERT_TYPE": InvertType,
    }

    def __repr__(self):
        return "{}".format(self.__class__.__name__)

    def __init__(
        self,
        init_es_dal=False,
        init_mysql_dal=False,
        init_cons_mysql_dal=False,
        init_kafka_producer=False,
        **kwargs,
    ):
        self.kwargs = kwargs
        self.__init_dal(
            init_es_dal=init_es_dal,
            init_mysql_dal=init_mysql_dal,
            init_cons_mysql_dal=init_cons_mysql_dal,
            init_kafka_producer=init_kafka_producer
        )
        self.data = dict()
        self.rules = []
        self.logger = logger

    def __init_dal(
        self,
        init_es_dal=False,
        init_mysql_dal=False,
        init_cons_mysql_dal=False,
        init_kafka_producer=False
    ):
        if init_es_dal:
            self.es_dal = EsBaseDal(**self.kwargs)
        if init_mysql_dal:
            self.sql_dal = SQLBaseDal(**self.kwargs)
        if init_cons_mysql_dal:
            self.cons_sql_dal = SQLBaseDal(
                mysql_db_name=config["MYSQL_CONS_DB_NAME"]
            )
        if init_kafka_producer:
            self.kafka_producer = KafkaProducer(
                bootstrap_servers=','.join(config['KAFKA_HOST'])
            )

    def ping_es(self):
        try:
            ping = self.es_dal.elasticsearch.ping()
        except Exception as e:
            ping = False
            traceback.print_exc()
        return ping

    def get_max_time(self, start_date, end_date, org_no):
        """
        获取总负荷最大的时间
        :return: str YYYY-MM-DD hh:mm:ss
        """
        if start_date == end_date:
            sql = """select max_time from {area_max_time} 
                     where compute_date='{start_date}' 
                       and org_no='{org_no}'""".format(
                area_max_time=self.AREA_MAX_TIME,
                start_date=start_date,
                org_no=org_no,
            )
        else:
            sql = """select max_time
                     from {area_max_time} 
                     where compute_date >= '{start_date}'
                       and compute_date <= '{end_date}'
                       and org_no='{org_no}'
                     order by max_p_total desc
                     limit 1
                  """.format(
                area_max_time=self.AREA_MAX_TIME,
                start_date=start_date,
                end_date=end_date,
                org_no=org_no,
            )

        res_df = self.sql_dal.sql_pandas.query(sql)
        if len(res_df) == 0:
            self.logger.info("未查到总负荷最大的时间")
            return "9999-01-01"
        max_time = res_df["max_time"].tolist()[0]
        self.logger.info("总负荷最大的时间:{}".format(max_time))

        return max_time

    def get_powerload_index(self, cons_no):
        powerload_idx = config["POWERLOAD"]
        on7 = str(ModelMap.get("c_cons_all", "on7", db_key=cons_no))
        is_cal = "1" if config["c_cons_filter_is_cal"] else "*"
        powerload_idx = (
            powerload_idx.replace("on5", on7[0:5])
            .replace("on7", on7)
            .replace("cal01", is_cal)
        )
        self.logger.info("要查询的powerload索引:{}".format(powerload_idx))
        return powerload_idx

    def convert_code_to_chinese(self, res_df):
        """
        转换特殊编码至中文
        :param res_df: pd.DataFrame 要转换的数据
        :return: pd.DataFrame 转换后的数据
        """
        if len(res_df) == 0:
            return res_df
        for column_str, type_obj in self.CONVERT_CODE_TO_CHINESE.items():
            if column_str not in res_df.columns.values:
                continue
            res_df[column_str].replace(type_obj.MAP, inplace=True)
        return res_df

    @staticmethod
    def get_district_field(district):
        """
        获取要查询的区域字段名称
        :param district: str
        :return: str
        """
        if district[-1] == "S":
            distr_field = "STAGE_ID"
        elif district[-1] == "L":
            distr_field = "LINE_ID"
        elif district[-1] == "A":
            distr_field = "AREA_ID"
        else:
            distr_field = None
        return distr_field

    @staticmethod
    def get_index_suffix(start_date, end_date):
        """
        获取要查询的索引后缀
        :param start_date: str YYYY-mm-dd
        :param end_date: str YYYY-mm-dd
        :return: str
        """
        if start_date == end_date:
            index_suffix = start_date.replace("-", ".")
        elif start_date[:7] == end_date[:7]:
            index_suffix = start_date[:7].replace("-", ".") + "*"
        elif start_date[:4] == end_date[:4]:
            index_suffix = start_date[:4].replace("-", ".") + "*"
        else:
            index_suffix = "*"
        return index_suffix

    def get_baseline_date(self, current_date):
        this_month_start = datetime.strptime(current_date, "%Y-%m-%d").replace(
            day=1
        )
        last_month_end = this_month_start - timedelta(days=1)
        last_month_start = last_month_end.replace(day=1)
        aclr_weather_hour = config.get(
            "ACLR_WEATHER_HOUR", "aclr_weather_hour"
        )

        sql = f"""select date(datetime) as date,max(tmp) as tmp
                  from {aclr_weather_hour}
                  where datetime>='{last_month_start}' and datetime < '{this_month_start}'
                  group by date(datetime)
               """
        base_tmp_df = self.sql_dal.sql_pandas.query(sql)
        if base_tmp_df.empty:
            print(f"缺少天气数据:{last_month_start}到{last_month_end}")
            return None
        # base_tmp_df['date'] = pd.to_datetime(base_tmp_df['date'])

        if self.data_source == "es":
            rules = [
                ("datetime", "query", "=", current_date),
                ("tmp", "stat", "max", ">", "0"),
            ]
            res = self.es_dal.get_group_vector(
                rules, aclr_weather_hour, doc_time_field="datetime"
            )
            today_tmp = round(res[0][1], 2)
        else:
            current_date_end = datetime.strptime(current_date, "%Y-%m-%d") + timedelta(days=1)
            sql = f"""select max(tmp) as max_tmp 
                      from {aclr_weather_hour} 
                      where datetime >='{current_date}' and datetime < '{current_date_end}'
                   """
            res = self.sql_dal.sql_pandas.query(sql)
            today_tmp = res["max_tmp"].tolist()[0]
        if today_tmp is None:
            print(f"缺少天气数据:{current_date}")
            return None
        greater_df = base_tmp_df[base_tmp_df["tmp"] >= today_tmp].sort_values(
            "tmp"
        )
        if not greater_df.empty:
            baseline_day = greater_df.iloc[0]["date"].strftime('%Y-%m-%d')
        else:
            smaller_df = base_tmp_df[
                base_tmp_df["tmp"] < today_tmp
            ].sort_values("tmp", ascending=False)
            baseline_day = smaller_df.iloc[0]["date"].strftime('%Y-%m-%d')
        self.logger.info(
            "今日:{}, 温度：{}，基线日:{}".format(current_date, today_tmp, baseline_day)
        )
        return baseline_day

    def get_predict_rate(self, res_df):
        """
        偏差率=（预测值-实际值）/预测值*100;(如果预测值和实际值都为0，则结果为0，如果仅预测值为0，则分母改为实际值)
        每天预测准确率=100%-（偏差率绝对值），最小取0%;
        预测准确率=AVG(每天准确率),没有准确率的日期不参与统计
        :param res_df:
        :return:
        """
        if res_df.empty:
            return None, None, res_df
        for p in ["p_total", "p_kt"]:
            p_true = f"{p}_true"
            p_pre = f"{p}_pre"
            sub_p_rate = f"sub_{p}_rate"
            pre_p_rate = f"pre_{p}_rate"

            res_df = res_df.astype({p_true: "float64", p_pre: "float64"})
            condition = (res_df[p_true].notnull()) & (res_df[p_pre] != 0)
            if condition.any():
                res_df.loc[condition, sub_p_rate] = (
                    abs(res_df[p_true] - res_df[p_pre]) / res_df[p_pre] * 100
                )
            res_df.loc[
                (res_df[p_true] == 0) & (res_df[p_pre] == 0), sub_p_rate
            ] = 0
            condition = (
                (res_df[p_true].notnull())
                & (res_df[p_true] != 0)
                & (res_df[p_pre] == 0)
                & (res_df[p_pre].notnull())
            )
            if condition.any():
                res_df.loc[condition, sub_p_rate] = (
                    abs(res_df[p_true] - res_df[p_pre]) / res_df[p_true] * 100
                )

            res_df[pre_p_rate] = 100 - res_df[sub_p_rate].clip(
                lower=0, upper=100
            )
            res_df[pre_p_rate] = res_df[pre_p_rate].round(2)

        res_df = res_df.drop(["sub_p_total_rate", "sub_p_kt_rate"], axis=1)

        total_rate = res_df["pre_p_total_rate"].mean()
        kt_rate = res_df["pre_p_kt_rate"].mean()
        total_rate = round(total_rate, 2) if not np.isnan(total_rate) else None
        kt_rate = round(kt_rate, 2) if not np.isnan(kt_rate) else None

        return total_rate, kt_rate, res_df

    def ana_query(
        self,
        table_name,
        rules,
        sort_field=None,
        doc_time_field="data_time",
        is_cons=False,
    ):
        if self.data_source == "es":
            res = self.es_dal.query_dataframe(
                rules,
                table_name,
                doc_time_field=doc_time_field,
                sort_field=sort_field[0],
            )
        else:
            res = self.sql_dal.sql_pandas.query_by_rules(
                table_name, rules, sort_field=sort_field
            )
        return res

    def ana_query_all(
        self,
        start_date,
        end_date,
        table_name,
        rules,
        data_source,
        doc_time_field,
        is_cons,
        is_one_day,
    ):
        if data_source == "es":
            res = self.es_dal.query_dataframe(
                rules,
                table_name,
                doc_time_field=doc_time_field,
                sort_field=None,
            )
        elif is_cons:
            if is_one_day:
                res_df = self.cons_sql_dal.sql_pandas.query_by_rules(
                    table_name, rules
                )
                if res_df.empty:
                    return res_df
                end_idx = int(
                    (end_date - start_date).total_seconds() // 60 / 15
                )
                p_field_list = [f"p_{str(i+1)}" for i in range(end_idx)]
                res_df = res_df[p_field_list + ["data_type"]]
                time_list = pd.date_range(
                    start_date, periods=end_idx, freq="15min"
                )
                time_df = pd.DataFrame({'data_time': time_list, 'p_field': p_field_list})

                def get_data_df(data_type, p_col):
                    df = (
                        res_df.loc[res_df["data_type"] == data_type]
                        .set_index("data_type")
                        .stack()
                        .reset_index()
                    )
                    df.columns = ["data_type", "p_field", p_col]
                 #   df["data_time"] = time_list
                    df = pd.merge(time_df, df, on="p_field",how='left')
                    df = df[["data_time", p_col]]
                    return df

                total_df = get_data_df(DataType.P_TOTAL, "p_total")
                kt_df = get_data_df(DataType.P_KT, "p_kt")
                res = pd.merge(total_df, kt_df, on="data_time")
            else:
                res_df = self.sql_dal.sql_pandas.query_by_rules(
                    table_name, rules
                )
                if res_df.empty:
                    return res_df
                p_total_field = (
                    "max_p_total"
                    if config["is_total_max"]
                    else "max_p_kt_total"
                )
                p_kt_field = (
                    "max_p_total_kt" if config["is_total_max"] else "max_p_kt"
                )
                res = res_df[["data_date", p_total_field, p_kt_field]]
                res = res.rename(
                    columns={
                        "data_date": "data_time",
                        p_total_field: "p_total",
                        p_kt_field: "p_kt",
                    }
                )
        else:
            res = self.sql_dal.sql_pandas.query_by_rules(table_name, rules)
        return res
