#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 数据质量通用类，包括解析sql和数据对比
# author：liuhui@aerozhonghuan.com
import datetime
from pyspark.sql import SQLContext
from pyspark.sql import SparkSession

"""
数据质量通用类
"""
class common_quality():
    def __init__(self,rule_type,db_name,table_name,column_name,custom_sql,sampling_mode,filter_condition):
        self.rule_type = rule_type
        self.db_name = db_name
        self.table_name = table_name
        self.column_name = column_name
        self.custom_sql = custom_sql
        self.sampling_mode = sampling_mode
        self.filter_condition = filter_condition

        self.where = '1=1'
        self.field = ''

    """
    组合拼装执行sql
    """
    def combin_sql(self):
        sql = '''SELECT {field} 
        FROM {db_name}.{table_name} 
        WHERE {where}'''.format(field=self.field,db_name=self.db_name,table_name=self.table_name,where=self.where)

        return sql
    """
    获取执行的sql
    """
    def get_execute_sql(self):

        yesterday = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime('%Y-%m-%d')

        if self.rule_type == '1':
            self.field = 'count(1) AS cnt'
            if self.filter_condition:
                self.filter_condition = self.filter_condition.replace('{yesterday}','"%s"'%(yesterday))
                self.where += ' AND %s' % (self.filter_condition)

            return self.combin_sql()
        elif self.rule_type == '2':
            if self.column_name:
                if self.sampling_mode in ['mode_sum','mode_avg','mode_max','mode_min']:
                    agg_str = self.sampling_mode.replace("mode_","")
                    self.field = '%s(%s) as cnt ' %(agg_str,self.column_name)

                    if self.filter_condition:
                        self.filter_condition = self.filter_condition.replace('{yesterday}','"%s"'%(yesterday))
                        self.where += ' AND %s' % (self.filter_condition)

                elif self.sampling_mode in ['mode_null']:
                    self.field = 'count(%s) as cnt' % (self.column_name)
                    self.where += ' AND %s is null' % (self.column_name)

                    if self.filter_condition:
                        self.filter_condition = self.filter_condition.replace('{yesterday}','"%s"'%(yesterday))
                        self.where += ' AND %s' % (self.filter_condition)
                elif self.sampling_mode in ['mode_zero']:
                    self.field = 'count(%s) as cnt' % (self.column_name)
                    self.where += ' AND %s =0' % (self.column_name)

                    if self.filter_condition:
                        self.filter_condition = self.filter_condition.replace('{yesterday}','"%s"'%(yesterday))
                        self.where += ' AND %s' % (self.filter_condition)
                elif self.sampling_mode in ['mode_repeat']:
                    self.field = '(count(%s) - count(distinct %s)) AS cnt' % (self.column_name,self.column_name)

                    if self.filter_condition:
                        self.filter_condition = self.filter_condition.replace('{yesterday}','"%s"'%(yesterday))
                        self.where += ' AND %s' % (self.filter_condition)

                return self.combin_sql()
            else:
                print('column_name 为空')
        elif self.rule_type == '3':
            return self.custom_sql
        else:
            print('rule_type 数据异常')

"""
spark 执行sql并返回结果，只返回第一行的第一个字段
"""
def execute_sql(sql):
    sc = SparkSession.builder.master("yarn").appName("data_quality_execute").\
        config("spark.memory.fraction",0.7).config("spark.shuffle.memoryFraction", 0.2). \
        config("spark.memory.storageFraction", 0.1).config("spark.sql.autoBroadcastJoinThreshold", "-1"). \
        config(" spark.driver.maxResultsSize", "0"). \
        config("spark.sql.shuffle.partitions", 200).config("spark.debug.maxToStringFields", 1000).\
        config("hive.exec.dynamic.partition.mode", "nonstrict").config("hive.exec.dynamic.partition", "true"). \
        config("spark.driver.maxResultSize", "2g").config("spark.executor.instances", "50"). \
        config("spark.yarn.queue", "dev").config("spark.port.maxRetries", 20).enableHiveSupport().getOrCreate()
    sqlContext = SQLContext(sc)

    result = sqlContext.sql(sql)
    #result.show()

    total_cnt = result.head()[0]
    #print(total_cnt)

    return total_cnt

if __name__ == '__main__':
    #表级过滤

    # 字段级
    #quality = common_quality(rule_type=2,db_name='dw_warehouse',table_name='dwd_lc_trackdwtag_phi',column_name='is_idle',custom_sql='',sampling_mode='mode_repeat',filter_condition="pdt='{yesterday}'")

    #自定义sql
    custom_sql = '''
    SELECT (count(is_idle) - count(distinct is_idle)) AS id_idle
            FROM dw_warehouse.dwd_lc_trackdwtag_phi
            WHERE 1=1 AND pdt='2021-10-21'
    '''
    quality = common_quality(rule_type=3,db_name='',table_name='',column_name='',custom_sql=custom_sql,sampling_mode='mode_custom',filter_condition="")

    execute_str = quality.get_execute_sql()
    print(execute_str)
    # 执行sql
    quality.execute_sql(execute_str)