# -*- coding: utf-8 -*-
###################################
# create: 2024-08-27
# author: gql
# desc: 本地数据上传到hdfs,分区表修复分区
###################################
import io
import logging
import sys
import time

from apps.utils.common import CommonUtils
from apps.utils.configparser import ConfigParser
from apps.utils.local import LocalUtils
from apps.utils.logger import Logger

reload(sys)
sys.setdefaultencoding("utf-8")

config = ConfigParser('../conf/config-put.ini')
config.parse()
# [logger]
log_path = config.get('logger', 'log_path')
log_name = config.get('logger', 'log_name')
# [local]
local_disk_dir = config.get('local', 'local_disk_dir')
local_db_dir = config.get('local', 'local_db_dir')
local_put_fin_path = config.get('local', 'local_put_fin_path')
local_scp_fin_path = config.get('local', 'local_scp_fin_path')
# [new_cluster]
hdfs_auth = config.get('new_cluster', 'hdfs_auth')
hdfs_db_path = config.get('new_cluster', 'hdfs_db_path')
hdfs_dfs_replication = config.get('new_cluster', 'hdfs_dfs_replication')
hive_jdbc_url = config.get('new_cluster', 'hive_jdbc_url')
hive_db_name = config.get('new_cluster', 'hive_db_name')
# [error]
error_loop_secs = config.get('error', 'error_loop_secs')
error_check_path = config.get('error', 'error_check_path')
# 日志
log = Logger(log_path, log_name, is_mask=False, c_level=logging.INFO, f_level=logging.INFO)
log.set_passwd(None)
util = CommonUtils(log)
loc = LocalUtils(log, util)
loc.set_config(local_disk_dir, local_disk_limit_percent=None)


# 校验scp之前文件大小是否与scp后文件大小一致
def check_record(tab_name, part_name, src_dir_size):
    path = "{}/{}".format(local_db_dir, tab_name) \
        if part_name == "" else "{}/{}/{}".format(local_db_dir, tab_name, part_name)
    dst_dir_size = loc.file_size_kb(path)
    # is check ok?
    if src_dir_size == dst_dir_size:
        return True, dst_dir_size
    log.error("[{}/{}] src: {}K, dst: {}K, 校验失败!".format(tab_name, part_name, src_dir_size, dst_dir_size))
    # 校验失败,记录到文件
    tab_part = []
    with io.open(error_check_path, 'r', encoding='utf-8') as fr:
        # stu:part=20240801:/root/stu/part=20240801:1000:1000
        for line in fr:
            fields = line.strip().split(":")
            tab_part.append((fields[0], fields[1], fields[-1]))
    if (tab_name, part_name, dst_dir_size) not in tab_part:
        with io.open(error_check_path, 'a+', encoding='utf-8') as fw:
            fw.write(u"{}:{}:{}:{}:{}\n".format(tab_name, part_name, path, src_dir_size, dst_dir_size))
            fw.flush()
    return False, dst_dir_size


# 获取scp和put完成信息
def get_finished_info():
    # 库名:表名:分区名:大小
    # ods:stu::1000
    scp_fin_dict = dict()
    scp_fin_set = set()
    with io.open(local_scp_fin_path, 'r', encoding='utf-8') as fr:
        for line in fr:
            fields = line.strip().split(":")
            # (表名:分区名) -> 分区大小
            scp_fin_dict[(fields[1], fields[2])] = fields[3]
            scp_fin_set.add((fields[1], fields[2]))
    # ods:stu::1000
    put_fin_set = set()
    with io.open(local_put_fin_path, 'r', encoding='utf-8') as fr:
        for line in fr:
            fields = line.strip().split(":")
            put_fin_set.add((fields[1], fields[2]))
    return scp_fin_set, put_fin_set, scp_fin_dict


# 全量表上传
def put_full(tab_name, part_name, dst_dir_size):
    local_tab_path = "{}/{}".format(local_db_dir, tab_name)
    script = hdfs_auth + "; hdfs dfs -Ddfs.replication={} -put -f {} {}/".format(
        hdfs_dfs_replication, local_tab_path, hdfs_db_path)
    log.info("正在上传全量表 local: {}, hdfs: {}/".format(local_tab_path, hdfs_db_path))
    stat_code = util.exec_local_shell0(script)

    loop = 0
    while stat_code is not None and str(stat_code) != "0":
        loop += 1
        util.sleep_strategy(loop, sleep_sec=60, message="hdfs put异常(表)")
        log.info("正在上传全量表 local: {}, hdfs: {}/".format(local_tab_path, hdfs_db_path))
        stat_code = util.exec_local_shell0(script)

    log.info("上传完成全量表 local: {}, hdfs: {}/".format(local_tab_path, hdfs_db_path))
    # 上传完成后更新put文件信息
    with io.open(local_put_fin_path, 'a+', encoding='utf-8') as fw:
        fw.write(u"{}:{}:{}:{}\n".format(hive_db_name, tab_name, part_name, dst_dir_size))
        fw.flush()
    loc.remove_path(local_tab_path)


# 分区表上传
def put_part(tab_name, part_name, dst_dir_size):
    local_part_path = "{}/{}/{}".format(local_db_dir, tab_name, part_name)
    hdfs_tab_path = "{}/{}".format(hdfs_db_path, tab_name)
    script = hdfs_auth + "; hdfs dfs -mkdir -p {}/".format(hdfs_tab_path)
    _, stat_code = util.exec_local_shell(script)
    log.info("正在上传分区 local: {}, hdfs: {}/".format(local_part_path, hdfs_tab_path))
    script = hdfs_auth + "; hdfs dfs -Ddfs.replication={} -put -f {} {}/".format(
        hdfs_dfs_replication, local_part_path, hdfs_tab_path)
    _, stat_code = util.exec_local_shell(script)

    put_loop = 0
    while stat_code is not None and str(stat_code) != "0":
        put_loop += 1
        util.sleep_strategy(put_loop, sleep_sec=60, message="hdfs put异常(分区)")
        log.info("正在上传分区 local: {}, hdfs: {}/".format(local_part_path, hdfs_tab_path))
        _, stat_code = util.exec_local_shell(script)

    log.info("上传完成分区 local: {}, hdfs: {}/".format(local_part_path, hdfs_tab_path))
    # 上传完成后更新put文件信息
    with io.open(local_put_fin_path, 'a+', encoding='utf-8') as fw:
        fw.write(u"{}:{}:{}:{}\n".format("ods", tab_name, part_name, dst_dir_size))
        fw.flush()
    loc.remove_path(local_part_path)
    # 分区表修复分区信息
    script = '{} -e "use {}; msck repair table {};"'.format(hive_jdbc_url, hive_db_name, tab_name)
    util.exec_local_shell(script)


# 数据上传到hdfs
def put_to_hdfs():
    scp_fin_set, put_fin_set, scp_fin_dict = get_finished_info()
    # 待上传列表(差集)
    put_sets = scp_fin_set - put_fin_set
    sorted_put_sets = sorted(put_sets, key=lambda x: x[1])
    log.info("待上传列表：{}".format(sorted_put_sets))
    for tab_name, part_name in sorted_put_sets:
        src_dir_size = scp_fin_dict[(tab_name, part_name)]
        # 检查Scp文件是否正确
        is_ok, dst_dir_size = check_record(tab_name, part_name, src_dir_size)
        if not is_ok:
            log.error("*** check失败, 正常该语句不会出现!")
            continue
        # 处理全量表
        if part_name == "":
            put_full(tab_name, part_name, dst_dir_size)
        # 处理分区表
        else:
            put_part(tab_name, part_name, dst_dir_size)


def main(argv):
    loc.touch_file(local_put_fin_path)
    loc.touch_file(local_scp_fin_path)
    loc.touch_file(error_check_path)
    while True:
        put_to_hdfs()
        log.info("等待 {} 秒".format(error_loop_secs))
        time.sleep(int(error_loop_secs))


if __name__ == '__main__':
    main(sys.argv)
