import os

import pandas as pd
from es_pandas import es_pandas

from air_web.config.config import config
from air_web.data_platform import init_db
from air_web.dw.data_mapping import ConsType
from air_web.dw.logger import init_log
from air_web.web_flask.dal.base_dal import EsBaseDal
import ctypes;

if not os.path.exists("/home/zshield/logs/dw/"):
    os.mkdir("/home/zshield/logs/dw/")
log = init_log("/home/zshield/logs/dw/")


class AclrResPowerVirtual:
    """生成aclr_res_cons"""

    def __init__(self):
        self.ep = es_pandas(config["ES_HOST"])
        self.es_dal = EsBaseDal(config["ES_HOST"])
        self.sql_engine = init_db()
        self.logger = log
        my_lib = ctypes.CDLL('air_web.ruleng.es.cpython-38-x86_64-linux-gnu.so')
        self.es = my_lib.zElasticsearch(config["ES_HOST"])
        self.index_name = config["POWERLOAD_VIRTUAL_IDX"]
        self.save_table = config["POWERLOAD_VIRTUAL_TABLE"]

    def get_all_index(self):
        idxs = self.es.cat.indices(index=self.index_name, format="json")
        names = [x["index"] for x in idxs]
        print(f"es:{config['ES_HOST']},获取到相关索引数量:{len(names)},退出")
        return names

    def get_res_power(self):
        field_name = "cons_no"
        # field_vals = '100000000'
        index_names = self.get_all_index()
        rules = [(field_name, "query", "exist")]
        for search_index in index_names:
            count = self.es_dal.ruleng_query_count(
                rules, search_index, doc_time_field="data_time"
            )
            self.logger.info(f"index_name:{search_index}, count:{count}")
            if count > 10000:
                per = 10000
                result_df = pd.DataFrame()
                cur = count // per + 1
                for i in range(cur):  # 1000*10
                    res_df = self.es_dal.query_dataframe_scroll(
                        rules,
                        search_index,
                        scroll_size=per,
                        doc_time_field="data_time",
                        start_index=i * per,
                    )
                    result_df = pd.concat([result_df, res_df])
                    if (i + 1) % 2 == 0:
                        result_df.reset_index(drop=True, inplace=True)
                        self.sql_engine.update_df_by_id(
                            result_df, self.save_table
                        )
                        result_df = pd.DataFrame()
                if not result_df.empty:
                    self.sql_engine.update_df_by_id(result_df, self.save_table)
            else:
                result_df = self.es_dal.query_dataframe(
                    rules, search_index, doc_time_field="data_time"
                )
                self.sql_engine.update_df_by_id(result_df, self.save_table)

    def get_cons_df(self):
        query_rule = {
            "query": {"terms": {"cons_type": [ConsType.ORDINARY_USER, ConsType.LINE_USER]}}
        }
        df = self.ep.to_pandas(config["C_CONS_IDX"], query_rule=query_rule)
        res_df = df[
            [
                "on5",
                "shi",
                "on7",
                "xian",
                "type_id",
                "type_code",
                "type_code_sort",
                "cons_no",
                "cons_name",
                "cons_type",
                "pare_type_id",
                "pare_type_code",
                "org_no",
                "org_name",
            ]
        ].drop_duplicates()
        self.sql_engine.update_df_by_id(res_df, config["C_CONS"])

    def main(self):
        print(
            f"执行虚拟用户档案导入mysql， 索引前缀：{config['C_CONS_IDX']}，写入表：{config['C_CONS']}"
        )
        self.get_cons_df()
        print(f"执行多索引导入mysql， 索引前缀：{self.save_table}，写入表：{self.save_table}")
        self.get_res_power()
        print("end")


if __name__ == "__main__":
    tool = AclrResPowerVirtual()
    tool.main()
