# -*- coding: utf-8 -*-
###################################
# create: 2024-08-27
# author: gql
# desc: hdfs下载并scp传输到远端服务器
#       10.0.0.10 -> 172.30.1.10
###################################
import io
import logging
import os
import sys
import time

from apps.utils.common import CommonUtils
from apps.utils.configparser import ConfigParser
from apps.utils.local import LocalUtils
from apps.utils.logger import Logger
from apps.utils.remote import RemoteTool

reload(sys)
sys.setdefaultencoding('utf8')

config = ConfigParser('../conf/config-get.ini')
config.parse()
# [logger]
log_path = config.get('logger', 'log_path')
log_name = config.get('logger', 'log_name')
# [local]
local_disk_dir = config.get('local', 'local_disk_dir')
local_data_dir = config.get('local', 'local_data_dir')
local_part_table_dir = config.get('local', 'local_part_table_dir')
local_big_part_path = config.get('local', 'local_big_part_path')
local_skip_big_path = config.get('local', 'local_skip_big_path')
local_skip_loop_path = config.get('local', 'local_skip_loop_path')
local_disk_limit_percent = config.get('local', 'local_disk_limit_percent')
# [old_cluster]
cls_full_limit = config.get('old_cluster', 'cls_full_limit')
cls_full_limit_unit = config.get('old_cluster', 'cls_full_limit_unit')
cls_db_name = config.get('old_cluster', 'cls_db_name')
cls_hdfs_auth = config.get('old_cluster', 'cls_hdfs_auth')
# [new_cluster]
remote_host = config.get('new_cluster', 'remote_host')
remote_pass = config.get('new_cluster', 'remote_pass')
remote_port = config.get('new_cluster', 'remote_port')
remote_disk_dir = config.get('new_cluster', 'remote_disk_dir')
remote_data_dir = config.get('new_cluster', 'remote_data_dir')
remote_disk_limit_percent = config.get('new_cluster', 'remote_disk_limit_percent')
remote_record_dir = config.get('new_cluster', 'remote_record_dir')
remote_scp_fin_path = config.get('new_cluster', 'remote_scp_fin_path')
remote_disk_check_secs = config.get('new_cluster', 'remote_disk_check_secs')
# [error]
error_loop_limit = config.get('error', 'error_loop_limit')
error_wait_sec = config.get('error', 'error_wait_sec')
# 日志
log = Logger(log_path, log_name, is_mask=False, c_level=logging.INFO, f_level=logging.INFO)
log.set_passwd(remote_pass)
util = CommonUtils(log)
net = RemoteTool(log, remote_host, remote_pass, remote_port)
net.set_config(remote_disk_dir, remote_disk_limit_percent, remote_disk_check_secs)
loc = LocalUtils(log, util)
loc.set_config(local_disk_dir, local_disk_limit_percent)


# 解析清单文件
def get_manifest_config(manifest_file):
    conf = []
    with io.open(manifest_file, 'r', encoding='utf-8') as fr:
        for line in fr:
            fields = line.strip().split("\t")
            # t_stu	15.2	G	/user/hive/t_stu	1
            tab_name = fields[0]
            tab_size = fields[1]
            tab_unit = fields[2]
            tab_path = fields[3]
            is_part = True if fields[4] == "1" else False
            conf.append((tab_name, tab_size, tab_unit, tab_path, is_part))
    return conf


# hdfs下载文件
def hdfs_get(hdfs_path, local_path):
    script = cls_hdfs_auth + ";hdfs dfs -get {} {}".format(hdfs_path, local_path)
    stat_code = util.exec_local_shell0(script)
    return stat_code


# 获取分区表详细信息
def get_table_part_info(tab_name, tab_path, part_start_dt, part_end_dt):
    fw = io.open(local_big_part_path, 'a+', encoding='utf-8')
    part_info = []
    # 1.5 G  /user/xxx/t_stu/par_dt=20220506
    # 0  /user/xxx/t_stu/par_dt=20240420
    part_du_file = "{}/part_du_{}.txt".format(local_part_table_dir, tab_name)
    loc.touch_file(part_du_file)
    script = cls_hdfs_auth + ";hdfs dfs -du -s -h {}/* > {}".format(tab_path, part_du_file)
    util.exec_local_shell(script)
    with io.open(part_du_file, 'r', encoding='utf-8') as fr:
        for line in fr:
            fields = line.strip().split(" ")
            part_size = fields[0]
            part_path = fields[-1]
            part_name = part_path.split("/")[-1]
            is_filtered = util.filter_by_part_dt(part_name, part_start_dt, part_end_dt)
            if is_filtered:
                continue
            # 处理没有单位的
            if fields[1] == "":
                part_unit = "-"
            elif fields[1] in ["K", "M", "G", "T"]:
                part_unit = fields[1]
            if part_unit == "T":
                fw.write(u"{};{};{};{}\n".format(tab_name, part_path, part_size, part_unit))
                fw.flush()
                continue
            part_info.append((part_name, part_size, part_unit, part_path))
    fw.close()
    return part_info


# scp完成后，发送到远端服务器表完成
def remote_scp_trans_done(db_name, tab_name, part_name, part_kb):
    script = '"echo {}:{}:{}:{} >> {}"'.format(db_name, tab_name, part_name, part_kb, remote_scp_fin_path)
    net.exec_remote_shell0(script)
    log.info("scp完成信号已传输到远端服务器")


# 下载并传输分区数据
def trans_by_part(tab_name, part_name, part_path, skip_file):
    log.info("hdfs get路径: {}".format(part_path))
    part_local_parent_path = "{}/{}".format(local_data_dir, tab_name)
    part_local_path = "{}/{}/{}".format(local_data_dir, tab_name, part_name)
    stat_code = hdfs_get(part_path, part_local_parent_path)
    loop = 0
    while stat_code is not None and str(stat_code) != "0":
        loop += 1
        if loop == error_loop_limit:
            log.warn("hdfs get分区失败{}次,记录并跳过!".format(loop))
            # 表名 分区 路径
            skip_file.write(u"{}\t{}\t{}\n".format(tab_name, part_name, part_path))
            skip_file.flush()
            loc.remove_path(part_local_path)
            return
        util.sleep_strategy(loop, sleep_sec=error_wait_sec, message="hdfs下载异常分区")
        loc.remove_path(part_local_path)
        stat_code = hdfs_get(part_path, part_local_parent_path)

    log.info("hdfs get成功: {}".format(part_path))
    local_part_kb = loc.file_size_kb(part_local_path)
    # 检查远端磁盘是否已满
    net.remote_check_disk(local_part_kb)

    log.info("scp远端传输文件(分区)")
    remote_addr = "{}:{}/{}".format(remote_host, remote_data_dir, tab_name)
    net.scp_to_remote(part_local_path, remote_addr)

    # 判断scp完成后目录大小与本地目录大小，如果一致，则写入信息，否则重试scp
    remote_part_kb = net.remote_file_size("{}/{}/{}".format(remote_data_dir, tab_name, part_name))
    loop = 0
    while remote_part_kb != local_part_kb:
        loop += 1
        log.error("scp失败, 表名: {}, 分区: {}, Remote_kb: {}, Local_kb: {}".format(
            tab_name, part_name, remote_part_kb, local_part_kb))
        # 睡眠等待，scp失败重传
        util.sleep_strategy(loop, sleep_sec=60, message="scp失败重传(分区)")
        net.scp_to_remote(part_local_path, remote_addr)
        remote_part_kb = net.remote_file_size("{}/{}/{}".format(remote_data_dir, tab_name, part_name))

    log.info("scp完成远端传输文件并校验成功")
    # 传输完成信息
    remote_scp_trans_done(cls_db_name, tab_name, part_name, local_part_kb)
    loc.remove_path(part_local_path)


# 下载并传输全量数据
def trans_by_full(tab_name, tab_path, skip_file):
    log.info("hdfs get路径: {}".format(tab_path))
    tab_local_parent_path = local_data_dir
    tab_local_path = "{}/{}".format(local_data_dir, tab_name)
    # 1.hdfs get
    stat_code = hdfs_get(tab_path, tab_local_parent_path)
    loop = 0
    while stat_code is not None and str(stat_code) != "0":
        loop += 1
        if loop == error_loop_limit:
            log.info("hdfs get全量表失败{}次,记录并跳过".format(error_loop_limit))
            # 表名 分区 路径
            skip_file.write(u"{}\t{}\t{}\n".format(tab_name, "-", tab_path))
            skip_file.flush()
            loc.remove_path(tab_local_path)
            return
        util.sleep_strategy(loop, sleep_sec=error_wait_sec, message="hdfs下载异常(表)")
        loc.remove_path(tab_local_path)
        stat_code = hdfs_get(tab_path, tab_local_parent_path)
    log.info("hdfs get成功: {}".format(tab_path))

    # 2.获取本地表大小并校验
    local_tab_kb = loc.file_size_kb(tab_local_path)
    net.remote_check_disk(local_tab_kb)

    # 3.scp传输
    log.info("scp远端传输文件(表)")
    remote_addr = "{}:{}/".format(remote_host, remote_data_dir)
    net.scp_to_remote(tab_local_path, remote_addr)
    # 判断scp完成后目录大小与本地目录大小，如果一致，则写入信息，否则重试scp
    remote_tab_kb = net.remote_file_size("{}/{}".format(remote_data_dir, tab_name))
    loop = 0
    while remote_tab_kb != local_tab_kb:
        loop += 1
        log.error("scp失败! 表名: {}, Remote_tab_kb: {}, Local_tab_kb: {}".format(tab_name, remote_tab_kb, local_tab_kb))
        util.sleep_strategy(loop, sleep_sec=60, message="scp失败重传表")
        net.scp_to_remote(tab_local_path, remote_addr)
        remote_tab_kb = net.remote_file_size("{}/{}".format(remote_data_dir, tab_name))
    log.info("scp远端传输文件完成并校验成功")

    # 传输完成信息
    remote_scp_trans_done(cls_db_name, tab_name, "", local_tab_kb)
    loc.remove_path(tab_local_path)


def main(argv):
    if len(argv) <= 4:
        print("""请输入4个参数: 批次号 批次清单文件 分区表开始日期(不限制填-) 分区表结束日期(不限制填-)
        比如: python {} 001 001.txt 20240821 20240824""".format(argv[0]))
        exit(2)

    # 批次号
    batch_no = argv[1]
    # 清单文件
    manifest_file = argv[2]
    # 分区表开始日期
    part_start_dt = argv[3]
    # 分区表结束日期
    part_end_dt = argv[4]
    start_dt = util.date_format('%Y-%m-%d %H:%M:%S')
    start_time = time.time()

    net.exec_remote_shell0("mkdir -p {}".format(remote_data_dir))
    net.exec_remote_shell0("mkdir -p {}".format(remote_record_dir))
    loc.touch_file(local_skip_big_path)
    loc.touch_file(local_skip_loop_path)
    skip_big_file = io.open(local_skip_big_path, 'a+', encoding='utf-8')
    skip_file = io.open(local_skip_loop_path, 'a+', encoding='utf-8')

    log.info("解析清单文件: {}".format(manifest_file))
    config_list = get_manifest_config(manifest_file)
    for tab_name, tab_size, tab_unit, tab_path, is_part in config_list:
        log.info("-->正在执行：{} {} {} {} {}".format(tab_name, tab_size, tab_unit, tab_path, is_part))
        table_local_path = "{}/{}".format(local_data_dir, tab_name)
        if not os.path.exists(table_local_path):
            os.makedirs(table_local_path)
        remote_tab_path = remote_data_dir + "/" + tab_name
        script = '"mkdir -p {}"'.format(remote_tab_path)
        net.exec_remote_shell0(script)
        if is_part:
            # #################################
            # ########### 分区同步 ############
            # #################################
            if util.is_part_sync(tab_size, tab_unit, cls_full_limit, cls_full_limit_unit):
                part_info_list = get_table_part_info(tab_name, tab_path, part_start_dt, part_end_dt)
                for part_name, part_size, part_unit, part_path in part_info_list:
                    # 校验待下载分区是否超过磁盘大小，超过则记录并跳过
                    is_enough = loc.local_disk_check(part_size, part_unit)
                    if not is_enough:
                        log.error("本地磁盘占比超过{}! 跳过分区: {} {}".format(local_disk_limit_percent, tab_name, part_name))
                        skip_big_file.write(
                            u"{}\t{}\t{}\t{}\t{}\n".format(tab_name, part_name, part_size, part_unit, part_path))
                        skip_big_file.flush()
                        continue
                    # 下载并传输分区数据
                    trans_by_part(tab_name, part_name, part_path, skip_file)
                continue

        # #################################
        # ########### 全量同步 ##############
        # #################################
        # 校验待下载表是否超过磁盘大小，超过则记录并跳过
        is_enough = loc.local_disk_check(tab_size, tab_unit)
        if not is_enough:
            log.warn("本地磁盘数据占比大，跳过表: {}".format(tab_name))
            skip_big_file.write(
                u"{}\t{}\t{}\t{}\n".format(tab_name, tab_size, tab_unit, tab_path))
            skip_big_file.flush()
            continue
        # 下载并传输全量数据
        log.info("下载并传输全量数据")
        trans_by_full(tab_name, tab_path, skip_file)

    skip_big_file.close()
    skip_file.close()

    end_time = time.time()
    log.info("*" * 50)
    log.info("\t完成批次：{}".format(batch_no))
    log.info("\t开始时间：" + start_dt)
    log.info("\t结束时间：" + util.date_format('%Y-%m-%d %H:%M:%S'))
    log.info("\t耗费时间：{:.2f}秒".format(end_time - start_time))
    log.info("*" * 50)


if __name__ == '__main__':
    try:
        main(sys.argv)
    except Exception as e:
        log.error("出现错误主程序退出! {}".format(e))
