# coding=utf-8
import os
import subprocess
import sys
import getopt

sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
import read_config


class Sample:
    def __init__(self
                 , arg_date
                 , env
                 , db_name
                 , source_table
                 , sample_ratio
                 , partition_date):
        """
        :param arg_date:
        :param env:
        :param source_table:
        """
        self.__arg_date = arg_date
        self.__env = env
        self.__sample_ratio = sample_ratio

        self.__hivebin = "hive"
        self.__hadoopbin = "hadoop"
        self.__sparksqlbin = "spark-sql"
        self.__sample_table_db = db_name

        self.__source_table = source_table
        self.__monitor_db = db_name  # 测试时改成生产库
        self.__db_table = self.__monitor_db + "." + source_table
        self.__sample_table = self.get_sample_table_name()
        self.__partition_date = partition_date
        self.__create_sample_table()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        pass

    def __destroy(self):
        self.__drop_sample_table()

    def sample(self):
        return self.__sample_write()

    def __sample_write(self):
        """
        :return: 采样表
        """
        # 获取分区字段
        monitor_db = self.__monitor_db
        source_table = self.__source_table
        hql = 'show partitions %(monitor_db)s.%(source_table)s ' % vars()
        hive = self.__hivebin
        cmd = """%(hive)s -e "%(hql)s" | tail -1 """ % vars()
        out, err, code = self.__exec_shell(cmd)
        out = out.split('/')
        partition_list = []
        for partition_key in out:
            position = partition_key.find("=")
            partition_list.append(partition_key[0:position])
        partition_key = ",".join(partition_list)
        partition_exclude = "|".join(partition_list)

        sample_table = self.get_sample_table_name()
        db_table = self.__db_table
        arg_date = self.__arg_date
        env = self.__env
        sample_table_db = self.__sample_table_db
        flag_sample_table = sample_table_db + "_" + source_table
        partition_date = self.__partition_date
        sample_ratio = self.__sample_ratio

        # partition_column
        if partition_key.find(",") > 0:
            partition_list.remove(partition_date)
            partition_str = ",".join(partition_list)
            partition_column = """ '%(arg_date)s' as date, %(partition_str)s """ % vars()
        else:
            partition_column = """ '%(arg_date)s' as date """ % vars()

        # 检查前置表
        front_table_path = "/mvad/checkpoint/hive-tables/%(monitor_db)s/%(source_table)s/%(arg_date)s" % vars()
        if (subprocess.call(
                """source /opt/mv-bash/helper-func.sh && hdfs_wait %(front_table_path)s 300 24 """ % vars(),
                shell=True) != 0):
            print("前置表未完成")
            sys.exit(1)
        else:
            print("前置表已完成")

        # sample rows
        user = os.environ['USER']
        if env == "prod":
            check_flag_exists = "hadoop fs -test -e '/mvad/warehouse/ext/hive/warehouse-quality/%(flag_sample_table)s/%(arg_date)s'" % vars()
            insert_flag = "hadoop fs -mkdir -p '/mvad/warehouse/ext/hive/warehouse-quality/%(flag_sample_table)s/%(arg_date)s'" % vars()
        else:
            check_flag_exists = "hadoop fs -test -e '/user/%(user)s/checkpoint/warehouse-quality/%(flag_sample_table)s/%(arg_date)s'" % vars()
            insert_flag = "hadoop fs -mkdir -p '/user/%(user)s/checkpoint/warehouse-quality/%(flag_sample_table)s/%(arg_date)s'" % vars()
        out, err, code = self.__exec_shell(check_flag_exists)
        if code == 0:
            print("无需再次抽样")
        else:
            hql = """
                     set mapreduce.job.priority=VERY_HIGH;
                     set mapreduce.job.queuename=etl;
                     set mapreduce.job.name=Hive:[hive-data][%(env)s][%(arg_date)s][sample-data];
                     set mapreduce.map.java.opts=-Xms2400m -Xmx2400m -XX:+UseParallelOldGC -XX:ParallelGCThreads=4 -XX:CICompilerCount=4 -XX:MaxTenuringThreshold=2 -XX:+UseNUMA -XX:+AlwaysPreTouch -XX:AutoBoxCacheMax=20000 -XX:MaxMetaspaceSize=256m -XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -verbose:gc;
                     set mapreduce.reduce.java.opts=-Xms3276m -Xmx3276m -XX:+UseParallelOldGC -XX:ParallelGCThreads=4 -XX:CICompilerCount=4 -XX:MaxTenuringThreshold=2 -XX:+UseNUMA -XX:+AlwaysPreTouch -XX:AutoBoxCacheMax=20000 -XX:MaxMetaspaceSize=256m -XX:+PrintGC -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -verbose:gc;
                     set io.file.buffer.size=65536;
                     set mapreduce.input.fileinputformat.split.minsize=536870912;

                     SET spark.sql.parser.quotedRegexColumnNames=true;
                     set hive.support.quoted.identifiers=none;
                     set hive.exec.dynamic.partition.mode=nonstrict;
                     insert overwrite table %(sample_table)s partition(%(partition_key)s)
                     select \`(%(partition_exclude)s)?+.+\`, %(partition_column)s
                     from %(db_table)s
                     where %(partition_date)s='%(arg_date)s' and rand()<%(sample_ratio)s
                  """ % vars()
            self.__exec_hive(hql)
            self.__exec_shell(insert_flag)

        return self.__sample_table

    @staticmethod
    def __exec_shell(cmd):

        print("===cmd:")
        print(cmd)
        p = subprocess.Popen(cmd,
                             stdout=subprocess.PIPE,
                             stdin=subprocess.PIPE,
                             shell=True)

        # Send the data and get the output
        stdout, stderr = p.communicate()

        # To interpret as text, decode
        out = stdout.decode('utf-8')
        err = ""
        code = p.returncode
        if code != 0 and stderr is not None:
            err = stderr.decode('utf-8')

        return out, err, code

    def get_sample_table_name(self, source_table=None, db=None):
        if source_table is None:
            source_table = self.__source_table
        else:
            source_table = source_table

        if db is None:
            db = self.__sample_table_db
        else:
            db = db

        sample_table_name = "%(db)s.sample_%(source_table)s"%vars()
        return sample_table_name

    def __drop_sample_table(self):
        sample_table = self.__sample_table
        hql = """
            drop table if exists %(sample_table)s
        """ % vars()

        self.__exec_hive(hql)

    def __exec_hive(self, hql):
        hive = self.__hivebin
        cmd = """%(hive)s -e "
              set mapreduce.job.queuename=etl;
              %(hql)s" """ % vars()

        out, err, code = self.__exec_shell(cmd)

        if code != 0:
            raise Exception(err)
        return out, err, code

    def __exec_spark_sql(self, hql):
        sparksql = self.__sparksqlbin
        cmd = """%(sparksql)s --queue etl -e "%(hql)s" """ % vars()

        out, err, code = self.__exec_shell(cmd)

        if code != 0:
            raise Exception(err)

    def __create_sample_table(self):
        sample_table = self.__sample_table
        db_table = self.__db_table

        hql = """
                create table if not exists %(sample_table)s like %(db_table)s
             """ % vars()

        self.__exec_hive(hql)


if __name__ == '__main__':
    # 解析参数
    opts, args = getopt.gnu_getopt(sys.argv[1:], "d:e:b:t:", ["date=", "env=", "db=", "table="])
    for op, value in opts:
        if op in ['-d', '--date']:
            ARG_TODAY = value
        if op in ['-e', '--env']:
            ENV = value
        if op in ['-b', '--db']:
            DB = value
        if op in ['-t', '--table']:
            TABLE = value

    # noinspection PyUnboundLocalVariable
    config = read_config.read_config(ENV, TABLE, DB)
    table_name_in = config[0]
    db_name_in = config[1]
    env_in = config[2]
    partition_date_in = config[3]
    sample_ratio_in = config[4]
    with Sample(ARG_TODAY, env_in, db_name_in, table_name_in, sample_ratio_in, partition_date_in) as sample:
        sample.sample()

    print('sample success')
