import time
from collections import defaultdict
from datetime import datetime

import pandas as pd
from elasticsearch.exceptions import ConnectionTimeout

from air_web.config.config import config, industry_agg_conf
from air_web.dw.data_mapping import AllIndustryMap, IndustryMap
from air_web.dw.dws_common import AggCommon


class IndResAgg(AggCommon):
    INDICATORS_COLS = [
        "p_total_sum",
        "p_kt_sum",
        "p_base_sum",
        "p_total_count",
        "p_kt_count",
        "p_base_count",
    ]

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        rewrite,
        base_df,
    ):
        super().__init__(
            logger,
            task,
            start_time,
            end_time,
            step,
            step_dict,
            rewrite=rewrite,
        )
        self.base_df = base_df
        self.indicators = {col: "sum" for col in self.INDICATORS_COLS}

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        if self.base_df.empty:
            self.logger.error(
                "初始聚合数据为空，不能继续聚合task_id:{},step:{},star_time:{} ...".format(
                    self.task_id, self.step, self.start_time
                )
            )
            return
        self.add_agg_dimensions(industry_agg_conf)
        self.agg_data()
        self.rename_add_dimensions(industry_agg_conf)
        self.rename_agg_indicators()
        self.add_max_time_flag()
        self.res_df["time_display_name"] = self.start_time
        self.save_data_to_table()


class IndResAggBase(AggCommon):
    def __init__(
        self, logger, task, start_time, end_time, step, step_base_dict
    ):
        super().__init__(
            logger, task, start_time, end_time, step, step_base_dict
        )

    def get_base_df(self):
        industry_map = config["industry_map"]
        type_id_str_list = [
            str(type_id) for type_id in industry_map["type_id"]
        ]
        sql = f"""select * 
                  from {self.source_table}
                  where data_time >= '{self.start_time}'
                    and data_time < '{self.end_time}'
                    and type_id in ({','.join(type_id_str_list)})
               """
        base_df = self.sql_engine.query(sql)
        # 填充nan值，否则groupby会丢数
        base_df["p_org_no"].fillna(-1, inplace=True)
        base_df["p_org_no"] = base_df["p_org_no"].astype("int64")
        base_df["ad_org_name"].fillna("全省", inplace=True)

        base_df["industry_id"] = base_df["type_id"].replace(
            industry_map["type_id"], industry_map["industry_id"]
        )
        base_df["industry_name"] = base_df["industry_id"].replace(
            IndustryMap.MAP
        )
        base_df["all_industry_id"] = base_df["type_id"].replace(
            industry_map["type_id"], industry_map["all_industry_id"]
        )
        base_df["all_industry_name"] = base_df["all_industry_id"].replace(
            AllIndustryMap.MAP
        )
        base_df["highest_type_id"] = 0

        return base_df

    def main(self):
        """主函数"""
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        t1 = time.time()
        base_df = self.get_base_df()
        t2 = time.time()
        self.logger.info(
            "处理完成time:{},task_id:{},step:{},star_time:{},数据条数:{} ...".format(
                t2 - t1, self.task_id, self.step, self.start_time, len(base_df)
            )
        )
        return base_df
