import getopt
import sys
from datetime import datetime

import numpy as np
import pandas as pd
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionTimeout

from air_web.config.config import config
from air_web.data_platform import init_db
from air_web.dw.data_mapping import ConsType, IndustryMap, SpecTypeMap


class RegPot:
    ORGNO_TYPEID_15MIN = config.get("ORGNO_TYPEID_15MIN", "orgno_typeid_15min")
    STAT_COLS = ["p_total_sum", "p_kt_sum", "p_base_sum"]
    SPEC_TYPE_15MIN = config.get("SPEC_TYPE_15MIN", "spec_type_15min")

    def __init__(self, start_time, end_time):
        self.sql_engine = init_db()
        self.es = Elasticsearch(config["ES_HOST"])

        self.start_time = datetime.strptime(start_time, "%Y-%m-%d %H:%M:%S")
        if (
            self.start_time.date() == datetime.now().date()
        ):  # 如果计算日是今天，那么end_time取现在(因为用户计算结果中存了全天的预测点)
            self.end_time = datetime.now()
        else:
            self.end_time = datetime.strptime(end_time, "%Y-%m-%d %H:%M:%S")

    def save_data_to_table(self, res_df):
        """存储数据"""
        if res_df.empty:
            print(f"写入{self.SPEC_TYPE_15MIN},结果数据为空")
            return
        res_df.replace({np.nan: None}, inplace=True)
        self.sql_engine.update_df_by_id(res_df, self.SPEC_TYPE_15MIN)
        print(f"写入{self.SPEC_TYPE_15MIN},数据条数:{len(res_df)}")

    def high_energy_consuming_industry(self):
        """大工业（高耗能）：工业中高耗能的行业减去六保行业"""
        child_trade_code = config["large_industry"]["child_trade_code"]
        if child_trade_code:
            where_sql = f"child_trade_code in ({','.join(child_trade_code)})"
            large_df = self.agg_cons_data(where_sql)
        else:
            type_id = config["large_industry"]["type_id"]
            where_sql = f"type_id in ({','.join(type_id)})"
            large_df = self.get_15min_data(where_sql)

        if large_df.empty:
            print("大工业聚合数据为空")
            return
        large_df["type_id"] = SpecTypeMap.HIGH_ENERGY_CON_IND
        large_df["type_code"] = SpecTypeMap.MAP[
            SpecTypeMap.HIGH_ENERGY_CON_IND
        ]
        self.save_data_to_table(large_df)

    def get_15min_data(self, where_sql):
        sql = f"""select org_no, org_name, p_org_no, data_time,
                         sum(p_total_sum) as p_total_sum, 
                         sum(p_kt_sum) as p_kt_sum, 
                         sum(p_base_sum) as p_base_sum
                  from {self.ORGNO_TYPEID_15MIN}
                  where data_time >= '{self.start_time}' and data_time < '{self.end_time}' 
                    and {where_sql}
                  group by org_no, org_name, p_org_no, data_time
              """
        thrid_ind_df = self.sql_engine.query(sql)
        return thrid_ind_df

    def general_industry_and_commerce(self):
        """一般工商业：第三产业减去六保行业"""
        where_sql = f"type_id = {IndustryMap.THIRD_IND}"
        thrid_ind_df = self.get_15min_data(where_sql)

        type_id = config["six_ensure"]["type_id"]
        where_sql = f"type_id in ({','.join(type_id)})"
        six_df = self.get_15min_data(where_sql)
        if six_df.empty:
            print("第三产业中的六保行业聚合数据为空")
            return

        thrid_ind_df = thrid_ind_df.merge(
            six_df[["org_no", "data_time"] + self.STAT_COLS],
            on=["org_no", "data_time"],
            how="left",
            suffixes=("", "_six"),
        )
        for col in self.STAT_COLS:
            col_six = f"{col}_six"
            thrid_ind_df[col] = thrid_ind_df[col] - thrid_ind_df[col_six]
            thrid_ind_df.drop(col_six, axis=1, inplace=True)
        thrid_ind_df["type_id"] = SpecTypeMap.GENERAL_IND_AND_COM
        thrid_ind_df["type_code"] = SpecTypeMap.MAP[
            SpecTypeMap.GENERAL_IND_AND_COM
        ]
        self.save_data_to_table(thrid_ind_df)

    def proc_agg_data(self, agg_res):
        res_list = []
        for bucket in agg_res:
            key_list = bucket["key"].split("|")
            bucket_dict = {"data_time": key_list[0]}
            for key, val_dict in bucket.items():
                if key in ["key", "doc_count"]:
                    continue
                bucket_dict.update({key: val_dict["value"]})
            res_list.append(bucket_dict)
        df = pd.DataFrame(res_list)
        return df

    def agg_data(self, on7_df):
        on7_dict = (
            on7_df[["on7", "on5", "cid_list"]]
            .drop_duplicates()
            .set_index("on7")[["on5", "cid_list"]]
            .to_dict()
        )

        # 拼接聚合维度的字段
        agg_columns = ["data_time"]
        group_source = ""
        for idx, key in enumerate(agg_columns):
            add_source = (
                f"+'|'+doc['{key}'].value" if idx else f"doc['{key}'].value"
            )
            group_source += add_source

        res_df = pd.DataFrame()
        for on7, on5 in on7_dict["on5"].items():
            cid_list = on7_dict["cid_list"][on7].split(",")
            print(
                f"正在聚合,date:{self.start_time},on7:{on7},用户数:{len(cid_list)} ..."
            )

            query = {
                "size": 0,
                "query": {
                    "bool": {
                        "must": [
                            {
                                "range": {
                                    "data_time": {
                                        "gte": self.start_time,
                                        "lt": self.end_time,
                                    }
                                }
                            },
                            {"terms": {"cons_no": cid_list}},
                        ]
                    }
                },
                "aggs": {
                    "group_by": {
                        "terms": {
                            "size": 10000,
                            "script": {
                                "source": group_source,
                                "lang": "painless",
                            },
                        },
                        "aggs": {
                            "p_total_sum": {"sum": {"field": "p_total"}},
                            "p_kt_sum": {"sum": {"field": "p_kt"}},
                            "p_base_sum": {"sum": {"field": "p_base"}},
                        },
                    }
                },
            }

            is_cal = "1" if config["c_cons_filter_is_cal"] else "*"
            index_name = (
                config["POWERLOAD"]
                .replace("on5", str(on5))
                .replace("on7", str(on7))
                .replace("cal01", is_cal)
            )
            for i in range(10):
                try:
                    result = self.es.search(
                        index=index_name, body=query, request_timeout=300
                    )
                    break
                except ConnectionTimeout:
                    print(f"search失败次数:{i+1},date:{self.start_time},on7:{on7}")
            agg_res = result["aggregations"]["group_by"]["buckets"]
            if len(agg_res) == 0:
                print(f"数据为空，跳过date:{self.start_time},on7:{on7}")
                continue
            on7_res_df = self.proc_agg_data(agg_res)
            on7_res_df["on7"] = on7
            res_df = pd.concat([res_df, on7_res_df])
        if res_df.empty:
            return res_df

        res_df = res_df.merge(
            on7_df[
                ["on5", "shi", "on7", "xian", "province_no", "province_name"]
            ],
            on="on7",
            how="left",
        )

        on5_df = (
            res_df.groupby(["on5", "shi", "province_no", "data_time"])[
                self.STAT_COLS
            ]
            .sum()
            .reset_index()
        )
        on5_df.rename(
            columns={
                "on5": "org_no",
                "shi": "org_name",
                "province_no": "p_org_no",
            },
            inplace=True,
        )

        pro_df = (
            res_df.groupby(["province_no", "province_name", "data_time"])[
                self.STAT_COLS
            ]
            .sum()
            .reset_index()
        )
        pro_df.rename(
            columns={"province_no": "org_no", "province_name": "org_name"},
            inplace=True,
        )
        pro_df["p_org_no"] = -1

        res_df.rename(
            columns={"on7": "org_no", "xian": "org_name", "on5": "p_org_no"},
            inplace=True,
        )
        cols = [
            "org_no",
            "org_name",
            "p_org_no",
            "data_time",
            "p_total_sum",
            "p_kt_sum",
            "p_base_sum",
        ]
        res_df = pd.concat([res_df[cols], on5_df[cols]])
        res_df = pd.concat([res_df[cols], pro_df[cols]])

        res_df["data_time"] = res_df["data_time"].astype("datetime64")
        return res_df

    def agg_cons_data(self, where_sql):
        sql = f"""select cc.on5,cc.shi,cc.on7,cc.xian,
                         ro.org_no province_no,ro.org_name province_name,
                         group_concat(cons_no) as cid_list 
                  from c_cons cc
                  left join real_org_no ro on ro.org_level = 0
                  where cons_no in 
                  (select distinct cons_no
                   from aclr_base_doc_all
                   where {where_sql} and cons_type={ConsType.ORDINARY_USER}
                  )
                  group by on5,shi,on7,xian,province_no,province_name
                  order by on5,on7
              """
        on7_df = self.sql_engine.query(sql)
        if on7_df.empty:
            print("档案中未查到对应的数据")
            return pd.DataFrame()
        df = self.agg_data(on7_df)
        return df

    def main(self):
        self.general_industry_and_commerce()
        self.high_energy_consuming_industry()


if __name__ == "__main__":
    start_date = "2020-06-28"
    end_date = "2020-06-30"

    opts, args = getopt.getopt(sys.argv[1:], "s:e:")
    for opt, val in opts:
        if opt == "-s":
            start_date = val
        elif opt == "-e":
            end_date = val

    date_list = pd.date_range(
        start=start_date, end=end_date, freq="1 D"
    ).strftime("%Y-%m-%d %H:%M:%S")
    step_dict = {"save_table": "area_cons_num"}
    for i in range(len(date_list) - 1):
        start_time = date_list[i]
        end_time = date_list[i + 1]
        RegPot(start_time, end_time).main()
