#!/usr/bin/python3.6
# coding=utf-8
import json
import os
import sys
import time
import getopt

sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
from utils import cmdutils
from sample import sample
import read_config


class Caldimsmetrics:
    def __init__(self, env, arg_date, sample_table, checked_table, checked_db, custom):
        self.__yaml_custom = custom
        self.__arg_date = arg_date
        self.__env = env
        self.__db = checked_db
        self.__db4sample = checked_db
        self.__sample_table = sample_table
        self.__checked_table = checked_table
        self.__cols_meta_table = self.__db4sample + self.__checked_table
        self.__target_metric_table = "dm_quality_metric_di"

    def cal(self):
        yaml_custom = self.__yaml_custom
        for config in yaml_custom:
            cal_type = config.get('cal_type')
            if cal_type == 1:
                self.__cal_metrics(config)
            if cal_type == 2:
                self.__cal_dims(config)
            if cal_type == 3:
                self.__cal_sparkSQL(config)

    def get_cal_metrics_sql_metas(self, config):
        sql_meta_list = []
        monitor_id = config.get('monitor_id')
        cal_type = config.get('cal_type')
        cal_algos = config.get('cal_algos')
        cols = config.get('cols')
        priority = config.get('priority')
        conditions = config.get('conditions')
        dim_value = config.get('dim_value')
        sql, meta = self.__gen_metric_sql(cal_algos, cols, monitor_id, conditions, cal_type, dim_value)
        sql_meta_list.append((sql, meta, priority, monitor_id))
        return sql_meta_list

    def get_cal_dims_sql_mets(self, config):
        sql_meta_list = []
        monitor_id = config.get('monitor_id')
        cal_type = config.get('cal_type')
        cal_algos = config.get('cal_algos')
        cols = config.get('cols')
        priority = config.get('priority')
        conditions = config.get('conditions')
        dim_value = config.get('dim_value')
        sqls, meta = self.__gen_dims_sql(cal_algos, cols, monitor_id, conditions, cal_type, dim_value)
        for sql in sqls:
            sql_meta_list.append((sql, meta, priority, monitor_id))

        return sql_meta_list

    def __cal_dims(self, config):
        for sqlm in self.get_cal_dims_sql_mets(config):
            cmdutils.exec_spark_sql(sqlm[0])

    def __cal_metrics(self, config):
        for sqlm in self.get_cal_metrics_sql_metas(config):
            cmdutils.exec_spark_sql(sqlm[0])

    def __cal_sparkSQL(self, config):
        env = self.__env
        arg_date = self.__arg_date
        db = self.__db
        target_metric_table = db + "." + self.__target_metric_table
        sample_table = self.__sample_table
        checked_table = self.__checked_table
        cal_time = int(time.time())
        monitor_id = config.get('monitor_id')
        cal_type = config.get('cal_type')
        monitor_name = config.get('monitor_name')
        sql = config.get('sql')
        sql = sql.replace("{{ table }}", sample_table).replace("{{ date }}", "'" + str(arg_date) + "'").replace("{{ db }}", db)
        final_sql = """ insert into table %(target_metric_table)s partition(date='%(arg_date)s')
                    select val,'%(env)s' as env,'%(sample_table)s' as sample_table,'%(checked_table)s' as checked_table,
                    col_name,cal_algo,'%(monitor_name)s' as alias,%(monitor_id)s as monitor_id,%(cal_time)s as cal_time,
                    'null' as conditions_json,%(cal_type)s as cal_type,dim_value
                    from (%(sql)s)""" % vars()
        cmdutils.exec_spark_sql(final_sql)

    def __gen_insert_project_sql(self, env, sample_table, checked_table, monitor_id, cal_time, val, col_name
                                 , cal_algo_json, alias, cache_table, conditions_json, cal_type, dim_value):
        if dim_value is None:
            dim_value = "null"
        sql = "select %(val)s as val" \
              ", '%(env)s' as env" \
              ", '%(sample_table)s' as sample_table" \
              ", '%(checked_table)s' as checked_table" \
              ", '%(col_name)s' as col_name" \
              ", '%(cal_algo_json)s' as cal_algo" \
              ", '%(alias)s' as alias" \
              ", '%(monitor_id)s' as monitor_id" \
              ", %(cal_time)s as cal_time" \
              ", '%(conditions_json)s' as conditions_json  " \
              ", '%(cal_type)s' as cal_type " \
              ", %(dim_value)s as dim_value " \
              "from %(cache_table)s" % vars()
        return sql.replace("\"", "\\\"")

    def __gen_dims_sql(self, cal_algos, cols, monitor_id, conditions, cal_type, dim_value):
        sample_table = self.__sample_table
        checked_table = self.__checked_table
        arg_date = self.__arg_date
        cal_time = int(time.time())
        sqls = []
        for col in cols:
            metas = []
            project_cols = []
            project_cols.append(col)
            for cal_algo in cal_algos:
                algo = cal_algo.get('algo')
                col_expr = cal_algo.get('col_expr')
                if 'alias' in cal_algo:
                    alias = cal_algo.get('alias')
                else:
                    alias = "%(col)s_%(algo)s_%(col_expr)s" % vars()
                meta = {"cal_algo": cal_algo, "cal_algo_json": cal_algo, "col": col, "alias": alias}
                if algo == "percentile":
                    p = "percentile_approx(%(col_expr)s, array(0.25,0.5,0.75), 9999) as %(alias)s " % vars()
                elif algo == "count_distinct":
                    p = "count(distinct %(col_expr)s) as %(alias)s " % vars()
                else:
                    p = "%(algo)s(%(col_expr)s) as %(alias)s" % vars()

                metas.append(meta)
                project_cols.append(p)
            cache_table = "cache_table_dim_g%(monitor_id)s_%(col)s" % vars()

            env = self.__env

            sql_conditions = self.__get_sql_conditions(conditions, dim_value, monitor_id)

            if len(sql_conditions) > 0:
                where_sql_str = " and ".join(sql_conditions)
            else:
                where_sql_str = " 1 = 1 "

            project_cols_str = "\n\t\t\t\t\t\t\t\t, ".join(project_cols)
            group_by_sql = "group by %(col)s" % vars()
            cache_table_sql = """
                        cache table %(cache_table)s AS 
                        select 
                                %(project_cols_str)s
                        from %(sample_table)s
                        where date='%(arg_date)s' and %(where_sql_str)s
                        %(group_by_sql)s
                    """ % vars()
            conditions_json = json.dumps(conditions)

            def gen_project_sql(sql_alias, col_name, cal_algo, dim_value):
                return self.__gen_insert_project_sql(env, sample_table, checked_table, monitor_id, cal_time, sql_alias,
                                                     col_name, cal_algo, sql_alias, cache_table, conditions_json, cal_type, dim_value)

            union_all_sql_list = []
            for meta in metas:
                sql_alias = meta.get('alias')
                col_name = meta.get('col')
                cal_algo = meta.get('cal_algo').get('algo')
                cal_algo_json = meta.get('cal_algo_json')
                if cal_algo == "percentile":
                    cal_algo_json['percentile'] = '25'
                    union_all_sql_list.append(
                        gen_project_sql("%(sql_alias)s[0]" % vars()
                                        , col_name
                                        , json.dumps(cal_algo_json), col_name))
                    cal_algo_json['percentile'] = '50'
                    union_all_sql_list.append(
                        gen_project_sql("%(sql_alias)s[0]" % vars()
                                        , col_name
                                        , json.dumps(cal_algo_json), col_name))
                    cal_algo_json['percentile'] = '75'
                    union_all_sql_list.append(
                        gen_project_sql("%(sql_alias)s[0]" % vars()
                                        , col_name
                                        , json.dumps(cal_algo_json), col_name))
                else:
                    union_all_sql_list.append(
                        gen_project_sql("%(sql_alias)s" % vars()
                                        , col_name
                                        , json.dumps(cal_algo_json).replace("'", "\\'"), col_name))
            union_all_sql = "\n\t\t\t\t\t\tunion all\n\t\t\t\t\t\t".join(union_all_sql_list)

            db = self.__db
            target_metric_table = db + "." + self.__target_metric_table
            insert_sql = """
                        insert into table %(target_metric_table)s partition(date='%(arg_date)s')
                        %(union_all_sql)s
                    """ % vars()

            sql = """
                        %(cache_table_sql)s ;
                        %(insert_sql)s ;
                    """ % vars()

            sqls.append(sql)
        return sqls, metas

    def __gen_metric_sql(self, cal_algos, cols, monitor_id, conditions, cal_type, dim_value):

        """
        生成的sql 为 两部分 ，一部分是cache table,  另一部分是 insert
        :param cal_algos:
        :param cols:
        :param monitor_id:
        :param conditions:
        :return:
        """

        sample_table = self.__sample_table
        checked_table = self.__checked_table
        arg_date = self.__arg_date
        cal_time = int(time.time())
        metas = []
        project_cols = []
        for cal_algo in cal_algos:
            algo = cal_algo.get('algo')
            for col in cols:
                if 'alias' in cal_algo:  # 优先取cal_algos的alias，再取cols的alias
                    alias = cal_algo.get('alias')
                else:
                    alias = "%(col)s_%(algo)s" % vars()

                # 计算分位数的分位数 为： 1/4   1/2   3/4
                if algo == "percentile":
                    p = "percentile_approx(%(col)s, array(0.25,0.5,0.75), 9999)  as %(alias)s " % vars()
                # count(distinct col)
                elif algo == 'count_distinct':
                    p = "count(distinct %(col)s) as %(alias)s " % vars()
                elif algo == 'count_empty_str':
                    p = "sum(case when %(col)s == '' then 1 else 0 end) as %(alias)s " % vars()
                elif algo == "count_null_str":
                    p = "sum(case when %(col)s == 'null' then 1 else 0 end) as %(alias)s " % vars()
                # sum(case when col is null then 1 else 0 end)
                elif algo == 'count_null':
                    p = "sum(case when %(col)s is null then 1 else 0 end) as %(alias)s " % vars()
                # sum(case when col == 0 then 1 else 0 end )
                elif algo == 'count_zero':
                    p = "sum(case when %(col)s == 0 then 1 else 0 end) as %(alias)s " % vars()
                elif algo == 'value_in_range':
                    min = cal_algo.get('min')
                    max = cal_algo.get('max')
                    p = "sum(case when (%(col)s < %(min)s or %(col)s > %(max)s) then 1 else 0 end) as %(alias)s" % vars()
                elif algo == 'value_in_enum':
                    enum = cal_algo.get('enum')
                    enumTuple = tuple(enum)
                    p = "sum(case when %(col)s not in %(enumTuple)s then 1 else 0 end) as %(alias)s" % vars()
                else:
                    #  coalesce(expr(col), 0 )   expr 可以是 count  sum avg ...
                    p = "coalesce(%(algo)s(%(col)s), 0) as %(alias)s" % vars()
                meta = {"cal_algo": algo, "cal_algo_json": cal_algo, "col": col, "alias": alias}
                metas.append(meta)
                project_cols.append(p)

        cache_table = "cache_table_metric_g%(monitor_id)s" % vars()

        env = self.__env

        sql_conditions = self.__get_sql_conditions(conditions, dim_value, '')

        if len(sql_conditions) > 0:
            where_sql_str = " and ".join(sql_conditions)
        else:
            where_sql_str = " 1 = 1 "

        project_cols_str = "\n\t\t\t\t\t\t, ".join(project_cols)
        cache_table_sql = """
            cache table %(cache_table)s AS 
            select 
                    %(project_cols_str)s
            from %(sample_table)s
            where date='%(arg_date)s' and %(where_sql_str)s
        """ % vars()

        conditions_json = json.dumps(conditions)

        dim_value = None

        def gen_project_sql(sql_alias, col_name, cal_algo_json):
            return self.__gen_insert_project_sql(env, sample_table, checked_table, monitor_id, cal_time, sql_alias, col_name, cal_algo_json, '',
                          cache_table, conditions_json, cal_type, dim_value)

        union_all_sql_list = []
        for meta in metas:
            sql_alias = meta.get('alias')
            col_name = meta.get('col')
            cal_algo_json = meta.get('cal_algo_json')
            cal_algo = meta.get('cal_algo')
            if cal_algo == "percentile":
                cal_algo_json['percentile'] = '25'
                union_all_sql_list.append(
                    gen_project_sql("%(sql_alias)s[0]" % vars()
                                    , col_name
                                    , json.dumps(cal_algo_json)))
                cal_algo_json['percentile'] = '50'
                union_all_sql_list.append(
                    gen_project_sql("%(sql_alias)s[1]" % vars()
                                    , col_name
                                    , json.dumps(cal_algo_json)))
                cal_algo_json['percentile'] = '75'
                union_all_sql_list.append(
                    gen_project_sql("%(sql_alias)s[2]" % vars()
                                    , col_name
                                    , json.dumps(cal_algo_json)))
            else:
                union_all_sql_list.append(
                    gen_project_sql("%(sql_alias)s" % vars()
                                    , col_name
                                    , json.dumps(cal_algo_json)))

        union_all_sql = "\n\t\t\tunion all\n\t\t\t".join(union_all_sql_list)

        db = self.__db
        target_metric_table = db + "." + self.__target_metric_table
        insert_sql = """
            insert into table %(target_metric_table)s partition(date='%(arg_date)s')
            %(union_all_sql)s
        """ % vars()

        sql = """
            %(cache_table_sql)s ;
            %(insert_sql)s ;
        """ % vars()

        return sql, metas

    def __get_sql_conditions(self, conditions, dim_values, monitor_id):
        sql_conditions = []
        db = self.__db
        table_name = self.__checked_table

        if conditions is None:
            return sql_conditions

        for condition in conditions:
            c_col = condition.get('col')
            c_op = condition.get('op')
            c_values = condition.get('value')
            if c_values == 'quality_metrics':  # top指标维度获取
                sql_value = '''select distinct dim_value
                                 from (select *
                                             ,row_number() over(partition by rank order by etl_date desc) as num 
                                         from %(db)s.quality_metrics
                                        where monitor_id = %(monitor_id)s
                                          and table_name = '%(table_name)s'
                                      ) as a
                                where num=1 ''' % vars()
                out, err, code = cmdutils.exec_spark_sql(sql_value)
                out = out.split('\n')
                out.remove('')  # 去除最后的空字符
                dim_values = out
                if isinstance(dim_values, list):
                    for dim_value in dim_values:
                        if isinstance(dim_value, str):
                            union_value_str = "','".join(map(str, dim_values))
                            union_value = "('" + union_value_str + "')"
                        else:
                            union_value_str = ",".join(map(str, dim_values))
                            union_value = "(" + union_value_str + ")"
                else:
                    if isinstance(dim_values, str):
                        union_value = "'" + dim_values + "'"
                    else:
                        union_value = dim_values
            elif isinstance(c_values, list):  # conditions的value不能配置成list
                print("yaml配置错误，conditions的value不能配置成list")
                sys.exit(1)
            elif isinstance(c_values, str):
                union_value = "'" + c_values + "'"
            elif isinstance(c_values, bool) or isinstance(c_values, int):
                union_value = c_values
            elif isinstance(dim_values, list):
                for dim_value in dim_values:
                    if isinstance(dim_value, str):
                        union_value_str = "','".join(map(str, dim_values))
                        union_value = "('" + union_value_str + "')"
                    else:
                        union_value_str = ",".join(map(str, dim_values))
                        union_value = "(" + union_value_str + ")"
            elif not isinstance(dim_values, list):
                print("yaml配置错误，dim_value只能配置成list")
                sys.exit(1)
            else:
                union_value = c_values

            # eq | ne | gt | lt | ge | le
            if c_op == "eq":
                sql_op = "="
            elif c_op == "ne":
                sql_op = "!="
            elif c_op == "gt":
                sql_op = ">"
            elif c_op == "lt":
                sql_op = "<"
            elif c_op == "ge":
                sql_op = ">="
            elif c_op == "le":
                sql_op = "<="
            elif c_op == "in":
                sql_op = "in"
            else:
                sql_op = None

            if sql_op is not None:
                sql_conditions.append("%(c_col)s %(sql_op)s %(union_value)s" % vars())

        return sql_conditions


if __name__ == '__main__':
    # 解析参数
    opts, args = getopt.gnu_getopt(sys.argv[1:], "d:e:b:t:", ["date=", "env=", "db=", "table="])
    for op, value in opts:
        if op in ['-d', '--date']:
            ARG_TODAY = value
        if op in ['-e', '--env']:
            ENV = value
        if op in ['-b', '--db']:
            DB = value
        if op in ['-t', '--table']:
            TABLE = value

    # noinspection PyUnboundLocalVariable
    config_in = read_config.read_config(ENV, TABLE, DB)
    table_name_in = config_in[0]
    db_name_in = config_in[1]
    env_in = config_in[2]
    partition_date_in = config_in[3]
    sample_ratio_in = config_in[4]
    custom_in = config_in[6]

    # get name of sample table
    with sample.Sample(ARG_TODAY, env_in, db_name_in, table_name_in, sample_ratio_in, partition_date_in) as sample:
        sample_table_out = sample.get_sample_table_name()

    cal = Caldimsmetrics(env_in, ARG_TODAY, sample_table_out, table_name_in, db_name_in, custom_in)
    cal.cal()

    print('cal success')
