import getopt
import os
import sys
from datetime import datetime

import numpy as np
import pandas as pd

from air_web.config.config import config
from air_web.dw.data_mapping import ConsType
from air_web.dw.dws_common import AggCommon
from air_web.dw.logger import init_log
from air_web.web_flask.dal.base_dal import EsBaseDal

if not os.path.exists("./logs/"):
    os.mkdir("./logs/")
log = init_log("./logs/")


class StatSonsNum(AggCommon):
    """生成area_cons_num"""

    IS_DAY_MAX_LIST = [1, 3] if config["is_total_max"] else [2, 3]

    def __init__(
        self,
        logger,
        task,
        start_time,
        end_time,
        step,
        step_dict,
        timedelta,
        rewrite=False,
    ):
        super().__init__(logger, task, start_time, end_time, step, step_dict)
        self.es_dal = EsBaseDal(config["ES_HOST"])
        self.c_cons_all = config.get("C_CONS_ALL", "aclr_base_doc_all")
        self.start_date = str(self.start_time.year) + "-01-30"
        self.end_date = str(self.start_time.year) + "-01-31"
        self.orgno_typeid_cons_num_table = config.get(
            "ORGNO_TYPEID_CONS_NUM", "orgno_typeid_cons_num"
        )
        self.cons_df = pd.DataFrame()
        self.cons_res_df = pd.DataFrame()
        self.area_df = pd.DataFrame()

    def get_id_df(self, res):
        id_list = []
        for row in res:
            id_list.append(
                [row[0][2]["cons_no"], row[0][1]["type_id"], row[0][0]["type_code_sort"]])
        id_df = pd.DataFrame(id_list, columns=["cons_no", "type_id", "type_code_sort"])
        return id_df

    def get_meter_num(self, on7):
        sql = f"""select distinct cons_no, cons_num
                 from aclr_base_doc_all
                 where on7={on7}
              """

        m_num_df = self.sql_engine.query(sql)
        if m_num_df["cons_num"].sum() == 0:
            self.logger.warning(
                "读取索引：{}cons_num为0,默认取1,task_id:{},step:{},star_time:{} ...".format(
                    self.c_cons_all, self.task_id, self.step, self.start_time
                )
            )
            return pd.DataFrame()
        return m_num_df

    def agg_org_all(self, org_field):
        org_type_cons_count = (
            self.cons_df.loc[self.cons_df["type_code_sort"]==1].groupby([org_field])
            .agg({"cons_num": ["sum", "count"]})
            .reset_index()
        )

        org_type_cons_count.columns = (
            org_type_cons_count.columns.get_level_values(0)
            + "_"
            + org_type_cons_count.columns.get_level_values(1)
        )
        org_type_cons_count.columns = [
            i[:-1] if i.endswith("_") else i
            for i in org_type_cons_count.columns
        ]
        # org_type_cons_count.set_axis(['org_no', 'cons_num', 'cons_count'], axis=1)
        org_type_cons_count = org_type_cons_count.rename(
            columns={
                org_field: "org_no",
                "cons_num_sum": "cons_num",
                "cons_num_count": "cons_count",
            }
        )
        org_type_cons_count["type_id"] = 0
        org_type_cons_count = org_type_cons_count[
            ["org_no", "type_id", "cons_num", "cons_count"]
        ]
        self.cons_res_df = pd.concat([self.cons_res_df, org_type_cons_count])

    def agg_org_type(self, org_field, type_field, is_filter=True):
        if is_filter:
            cons_df = self.cons_df.loc[self.cons_df['type_code_sort'] == 1]
        else:
            cons_df = self.cons_df
        org_type_cons_count = (
            cons_df.groupby([org_field, type_field])
            .agg({"cons_num": ["sum", "count"]})
            .reset_index()
        )
        org_type_cons_count.columns = (
            org_type_cons_count.columns.get_level_values(0)
            + "_"
            + org_type_cons_count.columns.get_level_values(1)
        )
        org_type_cons_count.columns = [
            i[:-1] if i.endswith("_") else i
            for i in org_type_cons_count.columns
        ]
        # org_type_cons_count.set_axis(['org_no', 'cons_num', 'cons_count'], axis=1)
        org_type_cons_count = org_type_cons_count.rename(
            columns={
                org_field: "org_no",
                "cons_num_sum": "cons_num",
                "cons_num_count": "cons_count",
                type_field: "type_id",
            }
        )
        self.cons_res_df = pd.concat([self.cons_res_df, org_type_cons_count])
        if type_field == "type_id":
            self.area_df = pd.concat([self.area_df, org_type_cons_count])

    def stat_orgno_typeid_cons_num(self):
        self.agg_org_type("on7", "type_id")
        self.agg_org_type("on7", "pare_type_id")
        self.agg_org_type("on7", "type_code_sort", is_filter=False)
        self.agg_org_all("on7")
        self.agg_org_type("on5", "type_id")
        self.agg_org_type("on5", "pare_type_id")
        self.agg_org_type("on5", "type_code_sort", is_filter=False)
        self.agg_org_all("on5")
        self.agg_org_type("province", "type_id")
        self.agg_org_type("province", "pare_type_id")
        self.agg_org_type("province", "type_code_sort", is_filter=False)
        self.agg_org_all("province")

        self.cons_res_df["year"] = self.start_time.year
        self.cons_res_df["cons_num"] = self.cons_res_df["cons_num"].astype(
            "int64"
        )
        self.cons_res_df["cons_count"] = self.cons_res_df["cons_count"].astype(
            "int64"
        )
        self.cons_res_df = self.cons_res_df.replace({np.nan: None})
        if len(self.cons_res_df):
            self.sql_engine.update_df_by_id(
                self.cons_res_df, self.orgno_typeid_cons_num_table
            )

        print(
            "写入mysql:{},task_id:{},step:{},star_date:{},end_date:{},数据条数:{}".format(
                self.orgno_typeid_cons_num_table,
                self.task_id,
                self.step,
                self.start_date,
                self.end_date,
                len(self.cons_res_df),
            )
        )

    def stats(self, base_df, province_no):
        base_df = base_df.sort_values(["on5", "on7"])
        on7_dict = (
            base_df[["on7", "on5"]]
            .drop_duplicates()
            .set_index("on7")["on5"]
            .to_dict()
        )
        for on7, on5 in on7_dict.items():
            self.logger.info(
                "正在聚合task_id:{},step:{},star_time:{},org:{} ...".format(
                    self.task_id, self.step, self.start_time, on7
                )
            )

            rules = [
                ("data_time", "query", ">=", self.start_date),
                ("data_time", "query", "<", self.end_date),
                ("on7", "query", "=", on7),
                ("is_day_max", "query", "in", self.IS_DAY_MAX_LIST),
                ("type_code_sort", "same", ">", "0"),
                ("type_id", "same", ">", "0"),
                ("cons_no", "same", ">", "0"),
            ]

            is_cal = "1" if config["c_cons_filter_is_cal"] else "*"
            index_name = (
                config["POWERLOAD"]
                .replace("on5", str(on5))
                .replace("on7", str(on7))
                .replace("cal01", is_cal)
            )
            res = self.es_dal.get_group_vector(
                rules, index_name, doc_time_field="data_time"
            )
            if len(res) == 0:
                self.logger.info(
                    "查询索引为空:{},task_id:{},step:{},star_time:{},on7:{} ...".format(
                        index_name,
                        self.task_id,
                        self.step,
                        self.start_time,
                        on7,
                    )
                )
            id_df = self.get_id_df(res)
            id_df["on7"] = on7
            id_df["province"] = province_no
            id_df = id_df.merge(base_df, on=["on7", "type_id", "type_code_sort"], how="left")

            m_num_df = self.get_meter_num(on7)
            if m_num_df.empty:
                id_df["cons_num"] = 1
            else:
                id_df = id_df.merge(m_num_df, on="cons_no", how="left")
            self.cons_df = pd.concat([self.cons_df, id_df])

        self.logger.info(
            "用户数量:{},task_id:{},step:{},star_time:{} ...".format(
                len(self.cons_df), self.task_id, self.step, self.start_time
            )
        )

        self.cons_df.to_csv('./cons.csv', index=False)
        # self.stat_orgno_typeid_cons_num()
        #
        # org_cons_count = self.area_df.groupby("org_no").agg(
        #     {"cons_num": "sum"}
        # )
        #
        # org_type_count = (
        #     self.area_df["org_no"]
        #     .value_counts()
        #     .reset_index()
        #     .rename(columns={"index": "org_no", "org_no": "type_num"})
        # )
        # self.res_df = pd.merge(
        #     org_cons_count, org_type_count, how="left", on="org_no"
        # )
        # self.res_df["cons_num"] = self.res_df["cons_num"].astype("int64")
        # self.res_df["year"] = self.start_time.year
        # self.res_df = self.res_df.replace({np.nan: None})

    def get_base_org_list(self):
        sql = f"""select distinct on7, on5, type_id, pare_type_id, type_code_sort
                 from c_cons where cons_type={ConsType.ORDINARY_USER}"""
        base_df = self.sql_engine.query(sql)
        self.logger.info("get base org list {}".format(len(base_df)))
        return base_df

    def get_province_no(self):
        sql = "select org_no from real_org_no where org_level=0"
        df = self.sql_engine.query(sql)
        province_no = df["org_no"].tolist()[0]
        return province_no

    def main(self):
        self.logger.info(
            "正在处理task_id:{},step:{},star_time:{} ...".format(
                self.task_id, self.step, self.start_time
            )
        )
        base_df = self.get_base_org_list()
        province_no = self.get_province_no()
        self.stats(base_df, province_no)
        self.save_data_to_table()


if __name__ == "__main__":
    # start_time = '2019-01-01'
    # end_time = '2024-01-01'

    start_time = str(datetime.now().year) + "-01-01"
    end_time = str(datetime.now().year + 1) + "-01-01"

    opts, args = getopt.getopt(sys.argv[1:], "s:e:")
    for opt, val in opts:
        if opt == "-s":
            start_time = val
        elif opt == "-e":
            end_time = val

    date_list = pd.date_range(
        start=start_time, end=end_time, freq="YS"
    ).strftime("%Y-%m-%d %H:%M:%S")
    step_dict = {"save_table": "area_cons_num"}
    for i in range(len(date_list) - 1):
        start_date = date_list[i]
        end_date = date_list[i + 1]
        ssn = StatSonsNum(
            log,
            {"task_id": 4, "task_name": "stat_cons_num"},
            start_date,
            end_date,
            "step_area_cons",
            step_dict,
            "1 D",
            rewrite=False,
        )
        ssn.main()
