#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import pathlib
import sys
import time

from gaussdbt.backup.back_up import stop_backup, BackUP
from gaussdbt.commons.const import BackupStepEnum, Env, RoachConstant, NormalErr, ErrorCode, GaussDBTReportDBLabel
from gaussdbt.commons.database_common import check_if_path_in_cache
from gaussdbt.commons.gaussdbt_common import query_err_code, check_uid_consistency
from gaussdbt.commons.gaussdbt_param_protection import ParamProtection
from gaussdbt.commons.roach_meta_info import mount_bind_backup_path, umount_bind_backup_path
from gaussdbt.resource.gaussdbt_resource import GaussCluster
from common.common import output_result_file, exter_attack, execute_cmd_list, execute_cmd
from common.common_models import SubJobDetails, ActionResult, LogDetail
from common.const import JobData, SubJobStatusEnum, ExecuteResultEnum, ReportDBLabel, DBLogLevel, SysData, \
    BackupTypeEnum, DeployType
from common.logger import Logger
from common.parse_parafile import ParamFileUtil, get_user_name
from common.util.check_utils import is_valid_id
from common.util.exec_utils import check_path_valid
from common.util.scanner_utils import scan_dir_size
from common.file_common import change_path_permission

log = Logger().get_logger("gaussdbt_plugin.log")


@exter_attack
def query_abort_progress(backup_id):
    log.info(f"Job id: {backup_id} exec {BackupStepEnum.STOP_TASK_PROGRESS.value} interface")
    output = SubJobDetails(taskId=backup_id, subTaskId="", progress=100,
                           taskStatus=SubJobStatusEnum.ABORTED_FAILED.value)
    # 停止任务查询进度
    cmd = ["ps -ef", f"grep {RoachConstant.ROACH_STOP}"]
    return_code, std_out, std_err = execute_cmd_list(cmd)
    result = False
    if return_code != "0":
        log.info(f"Exec {BackupStepEnum.STOP_TASK_PROGRESS.value} succeed")
        output.task_status = SubJobStatusEnum.ABORTED.value
        result = True
    output_result_file(JobData.PID, output.dict(by_alias=True))
    log.info(f"Job id: {backup_id} exec {BackupStepEnum.STOP_TASK_PROGRESS.value} interface end")
    return result


@exter_attack
def query_progress(step, backup_id, sub_job_id):
    log.info(f"Job id: {backup_id} exec {step} interface")
    Env.USER_NAME = "job_protectEnv_auth_authKey"
    try:
        param_protection = ParamProtection(JobData.PID, step, backup_id)
    except Exception:
        log.error("Failed to parse param file")
        return False
    repositories_info = param_protection.get_repository_list()
    cache_path = repositories_info.get("cache_repository")[0]
    if not cache_path:
        log.error(f"Cache_path: {cache_path} illegal")
        return False
    output = SubJobDetails(taskId=backup_id, subTaskId="", progress=100, logDetail=[],
                           taskStatus=SubJobStatusEnum.FAILED.value)
    err_dict = LogDetail(logInfo='', logInfoParam=[], logTimestamp=0, logDetail=0, logDetailParam=[], logDetailInfo=[],
                         logLevel=3)
    if step == BackupStepEnum.PRE_TASK_PROGRESS:
        # 前置任务查询进度
        file_path = os.path.join(cache_path, BackupStepEnum.PRE_TASK_PROGRESS.value)
        get_pre_progress(file_path, cache_path, backup_id, output, err_dict)
    elif step == BackupStepEnum.POST_TASK_PROGRESS:
        # 后置任务查询进度
        file_path = os.path.join(cache_path, f"{BackupStepEnum.POST_TASK_PROGRESS.value}_{sub_job_id}")
        output.sub_task_id = sub_job_id
        get_post_progress(file_path, backup_id, output)
    else:
        log.error(f"Job id: {backup_id} exec {step} interface not support")
    output_result_file(JobData.PID, output.dict(by_alias=True))
    return True


def get_post_progress(file_path, backup_id, output):
    if not os.path.exists(file_path) and check_path_valid(file_path):
        log.error(f"Job id: {backup_id} exec {BackupStepEnum.POST_TASK_PROGRESS} interface Failed")
        return
    with open(file_path, "r", encoding='UTF-8') as tmp_file:
        data = tmp_file.read()
    if "Running" in data:
        output.task_status = SubJobStatusEnum.RUNNING.value
    elif "Completed" in data:
        output.task_status = SubJobStatusEnum.COMPLETED.value
        os.remove(file_path)


def get_pre_progress(file_path, cache_path, backup_id, output, err_dict):
    if not os.path.exists(file_path):
        log.error(f"Job id: {backup_id} exec {BackupStepEnum.PRE_TASK_PROGRESS} interface Failed")
        return
    if check_if_path_in_cache(file_path, cache_path):
        os.remove(file_path)
    pre_job_err = query_err_code(os.path.join(cache_path, f'{backup_id}errcode'),
                                 get_user_name(f"{Env.USER_NAME}_{JobData.PID}"))
    if pre_job_err not in (NormalErr.NO_ERR, NormalErr.WAITING):
        err_dict.log_detail = pre_job_err
        output.log_detail.append(err_dict)
        log.info(f"Job id: {backup_id} exec {BackupStepEnum.PRE_TASK_PROGRESS} interface failed")
    else:
        log.info(f"Job id: {backup_id} exec {BackupStepEnum.PRE_TASK_PROGRESS} interface succeed")
        output.task_status = SubJobStatusEnum.COMPLETED.value


def check_database_backup_progress(backup_id):
    """
    查询数据库备份进度
    """
    log.info(f"Job id: {backup_id} check database backup progress.")
    user_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    cmd = f"su - {user_name} -c 'zsql / as sysdba -q -c \"select progress from DV_BACKUP_PROCESSES;\"'"
    return_code, std_out, std_err = execute_cmd(cmd)
    if return_code != "0":
        log.error(f"Fail to exec cmd check database backup progress! Error: {std_err}.")
        return "UNKNOWN"
    progress = "UNKNOWN"
    sql_result = std_out.split("\n")
    log.info(f"Check database backup progress sql result: {sql_result}.")
    key_index = 0
    for item in sql_result:
        if item.find("PROGRESS") != -1:
            key_index = sql_result.index(item)
            break
    value_index = key_index + 2

    if len(sql_result) > value_index:
        progress = sql_result[value_index]
    return progress


def set_log_detail(cache_path, backup_id, sub_job_id, task_status, progress):
    log.info(f"Start to set log detail, sub job id: {sub_job_id}, progress: {progress}, task status: {task_status}.")
    output = SubJobDetails(taskId=backup_id, subTaskId=sub_job_id, taskStatus=task_status,
                           progress=int(progress), logDetail=[], dataSize=0, speed=0, extendInfo=None)
    err_log = LogDetail(logInfo='', logInfoParam=[], logTimestamp=0, logDetail=0, logDetailParam=[],
                        logDetailInfo=[], logLevel=3)
    label_file = os.path.join(cache_path, f'{sub_job_id}{GaussCluster.get_hostname()}lb')
    progress_file = os.path.join(cache_path, backup_id)
    try:
        with open(progress_file, "r", encoding='UTF-8') as f:
            data = f.read()
    except (FileExistsError, IOError):
        log.warning(f"Failed to read progress file!")
        return output
    if "[FAILURE]" in data or 'Failed' in data:
        if os.path.exists(label_file):
            os.remove(label_file)
        err_log.log_info = ReportDBLabel.BACKUP_SUB_FAILED
        err_log.log_info_param = [sub_job_id]
        err_log.log_detail = ErrorCode.ERROR_INTERNAL_ERROR.value
        err_log.log_level = DBLogLevel.ERROR.value
        err_log.log_detail_info = [data]
        output.log_detail.append(err_log)
        return output

    if not os.path.exists(label_file):
        try:
            pathlib.Path(label_file).touch()
        except Exception:
            log.error(f'Error while creating label file {label_file}')
            return output
        err_log.log_info = ReportDBLabel.BACKUP_SUB_START_COPY
        err_log.log_info_param = [sub_job_id]
        err_log.log_level = DBLogLevel.INFO.value
        output.log_detail.append(err_log)
    elif task_status != SubJobStatusEnum.FAILED.value:
        db_progress = check_database_backup_progress(backup_id)
        err_log.log_info = GaussDBTReportDBLabel.BACKUP_PROGRESS_CHECKING
        err_log.log_info_param = [GaussCluster.get_endpoint_by_hostname(), db_progress]
        err_log.log_level = DBLogLevel.INFO.value
        output.log_detail.append(err_log)
        time.sleep(60)
    return output


def create_sub_dirs_for_cluster_nodes(data_area, meta_area, log_area=None):
    """
    为集群节点创建分别创建子目录
    :param data_area: 挂载数据目录
    :param meta_area: 挂载元数据目录
    :param log_area: 挂载日志目录
    :return: 是否创建成功，以及各子目录的路径
    """
    if GaussCluster.get_deploy_type() != DeployType.CLUSTER_TYPE:
        log.info("Not cluster type, no need to create sub dirs.")
        ret = (True, data_area, meta_area, log_area)
        return ret
    user_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    if check_uid_consistency(user_name, JobData.PID):
        log.info("Uid is consistent, no need to create sub dirs.")
        ret = (True, data_area, meta_area, log_area)
        return ret
    local_ip = GaussCluster.get_endpoint_by_hostname()
    log.info(f"Start to create sub dirs for node: {local_ip}.")
    sub_data_area = os.path.join(os.path.dirname(data_area), local_ip, "data")
    sub_meta_area = os.path.join(os.path.dirname(meta_area), local_ip, "meta")
    if not os.path.exists(sub_data_area):
        log.info(f"Start to create sub dir: {sub_data_area}")
        try:
            os.makedirs(sub_data_area, mode=0o755)
        except Exception as ex:
            log.error(f"Make directory {sub_data_area} failed, error: {ex}")
            ret = (False, data_area, meta_area, log_area)
            return ret
        change_path_permission(sub_data_area, user_name)
    if not os.path.exists(sub_meta_area):
        log.info(f"Start to create sub dir: {sub_meta_area}")
        try:
            os.makedirs(sub_meta_area, mode=0o755)
        except Exception as ex:
            log.error(f"Make directory {sub_meta_area} failed, error: {ex}")
            ret = (False, data_area, meta_area, log_area)
            return ret
        change_path_permission(sub_meta_area, user_name)
    data_area = sub_data_area
    meta_area = sub_meta_area
    if log_area:
        sub_log_area = os.path.join(log_area, local_ip)
        if not os.path.exists(sub_log_area):
            log.info(f"Start to create sub dir: {sub_log_area}")
            try:
                os.makedirs(sub_log_area, mode=0o755)
            except Exception as ex:
                log.error(f"Make directory {sub_log_area} failed, error: {ex}")
                ret = (False, data_area, meta_area, log_area)
                return ret
            change_path_permission(sub_log_area, user_name)
        log_area = sub_log_area
    log.info("Create sub dirs for cluster node successfully.")
    ret = (True, data_area, meta_area, log_area)
    return ret


@exter_attack
def exec_cluster_backup_prepare(step, backup_id, sub_job_id, repositories_info):
    cache_path = repositories_info.get("cache_repository")[0]
    data_path = os.path.join(repositories_info.get("data_repository")[0], "data")
    meta_path = os.path.join(repositories_info.get("meta_repository")[0], "meta")
    result_file = os.path.join(cache_path, f"{BackupStepEnum.PRE_BACKUP_PROGRESS.value}_{sub_job_id}")
    if step == BackupStepEnum.BACKUP_PROGRESS:
        log.info(f"Job id: {backup_id} exec backup prepare progress interface")
        output = SubJobDetails(taskId=backup_id, subTaskId=sub_job_id, progress=100,
                               taskStatus=SubJobStatusEnum.FAILED.value, logDetail=[], dataSize=0,
                               speed=0, extendInfo=None)
        err_log = LogDetail(logInfo='', logInfoParam=[], logTimestamp=0, logDetail=0, logDetailParam=[],
                            logDetailInfo=[], logLevel=3)
        if not os.path.exists(result_file):
            log.error(f"Job id: {backup_id} cluster backup prepare progress file {result_file} not exit")
        else:
            if check_if_path_in_cache(result_file, cache_path):
                os.remove(result_file)
            log.info(f"Job id: {backup_id} remove cluster backup prepare file {result_file} success")
            output.task_status = SubJobStatusEnum.COMPLETED.value
        err_log.log_info = ReportDBLabel.BACKUP_SUB_START_PREPARE
        err_log.log_info_param = [sub_job_id]
        err_log.log_level = DBLogLevel.INFO.value
        output.log_detail.append(err_log)
        output_result_file(JobData.PID, output.dict(by_alias=True))
        log.info(f"Job id: {backup_id} exec backup prepare progress interface success")
    else:
        log.info(f"Job id: {backup_id} exec backup prepare interface")
        if "log_repository" in repositories_info:
            log_path = repositories_info.get("log_repository")[0]
            ret, data_path, meta_path, log_path = create_sub_dirs_for_cluster_nodes(data_path, meta_path, log_path)
            if not ret:
                return False
            mount_bind_backup_path(data_path, meta_path, log_path, backup_type=BackupTypeEnum.LOG_BACKUP)
        else:
            ret, data_path, meta_path, _ = create_sub_dirs_for_cluster_nodes(data_path, meta_path)
            if not ret:
                return False
            mount_bind_backup_path(data_path, meta_path)
        pathlib.Path(result_file).touch()
        output = ActionResult(code=ExecuteResultEnum.SUCCESS)
        output_result_file(JobData.PID, output.dict(by_alias=True))
        log.info(f"Job id: {backup_id} create backup cluster prepare file {result_file} success")
    return True


@exter_attack
def exec_cluster_umount_bind(step, backup_id, sub_job_id):
    log.debug(f"Job id: {backup_id} enter umount bind interface")
    if step == BackupStepEnum.BACKUP_PROGRESS:
        log.info(f"Job id: {backup_id} exec cluster backup umount bind progress interface")
        output = SubJobDetails(taskId=backup_id, subTaskId=sub_job_id, progress=100,
                               taskStatus=SubJobStatusEnum.COMPLETED.value, logDetail=[], dataSize=0,
                               speed=0, extendInfo=None)
        err_log = LogDetail(logInfo='', logInfoParam=[], logTimestamp=0, logDetail=0, logDetailParam=[],
                            logDetailInfo=[], logLevel=3)
        err_log.log_info = ReportDBLabel.BACKUP_SUB_START_UMOUNT
        err_log.log_info_param = [sub_job_id]
        err_log.log_level = DBLogLevel.INFO.value
        output.log_detail.append(err_log)
        output_result_file(JobData.PID, output.dict(by_alias=True))
    else:
        umount_bind_backup_path()
        log.info(f"Job id: {backup_id} exec backup umount bind interface success")
    return True


def get_backup_copy_size(backup, sub_job_id, repositories_info, backup_type, output):
    cache_area = repositories_info.get("cache_repository", [""])[0]
    if check_path_valid(os.path.join(cache_area, f'{sub_job_id}{GaussCluster.get_hostname()}lb')):
        os.remove(os.path.join(cache_area, f'{sub_job_id}{GaussCluster.get_hostname()}lb'))
    backup_key = backup.get_backup_key()
    deploy_type = GaussCluster.get_deploy_type()
    log.info(f"{deploy_type} get copy size.")
    user_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    if deploy_type == DeployType.CLUSTER_TYPE and not check_uid_consistency(user_name, JobData.PID):
        size = 0
        ret = True
        all_nodes = GaussCluster.get_all_node()
        for node in all_nodes:
            if backup_type == BackupTypeEnum.LOG_BACKUP:
                log.info("Get log copy size.")
                data_area = os.path.join(repositories_info.get("log_repository", [""])[0], node.node_ip)
                data_path = os.path.join(data_area, RoachConstant.ROACH_ARCH, backup_key)
            else:
                log.info("Get data copy size.")
                data_area = os.path.join(repositories_info.get("data_repository", [""])[0], node.node_ip, "data")
                data_path = os.path.join(data_area, RoachConstant.ROACH_DATA, "roach", backup_key)
            sub_ret, sub_size = scan_dir_size(sub_job_id, data_path)
            log.info(f"{node.node_ip} sub_size: {sub_size}, result: {sub_ret}.")
            ret = ret and sub_ret
            size += sub_size
        if ret:
            log.info(f"Total size: {size}.")
            output.data_size = int(size)
        return output
    if backup_type == BackupTypeEnum.LOG_BACKUP:
        data_area = os.path.join(repositories_info.get("log_repository", [""])[0])
        data_path = os.path.join(data_area, RoachConstant.ROACH_ARCH, backup_key)
    else:
        data_area = os.path.join(repositories_info.get("data_repository", [""])[0], "data")
        data_path = os.path.join(data_area, RoachConstant.ROACH_DATA, "roach", backup_key)
    ret, size = scan_dir_size(sub_job_id, data_path)
    if ret:
        output.data_size = int(size)
    return output


@exter_attack
def exec_backup(step, backup_id, sub_job_id):
    log.info(f"Job id: {backup_id} exec {step} interface")
    Env.USER_NAME = "job_protectEnv_auth_authKey"
    user_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    try:
        param_protection = ParamProtection(JobData.PID, step, backup_id, user_name)
    except Exception:
        log.error("Failed to parse param file")
        return False
    # 下面调用stop_backup，里面传入的user_name需要校验，需先实例化ParamProtection校验
    if step == BackupStepEnum.STOP_TASK:
        return stop_backup(user_name)
    repositories_info = param_protection.get_repository_list()
    all_repositories = param_protection.safe_get_all_repositories()
    backup_type = param_protection.get_backup_type()
    backup_standby = param_protection.get_backup_standby()
    for value in repositories_info.values():
        if not value[0]:
            return False
    backup = BackUP(repositories_info, backup_id, backup_type, user_name, JobData.PID, all_repositories, backup_standby)
    backup.set_sub_job_id_and_parallel_process(sub_job_id, param_protection.get_parallel_process())
    backup.set_sub_dirs_for_cluster_nodes()
    function_dict = {
        BackupStepEnum.PRE_TASK.value: backup.pre_backup,
        BackupStepEnum.BACKUP.value: backup.backup,
        BackupStepEnum.POST_TASK.value: backup.post_backup
    }
    function_type = step
    subjob = param_protection.get_subjob()
    # prepare子任务和umount_bind子任务的function_type都为Backup，执行子任务后直接在下面返回result
    ret, result = exec_cluster_job(step, backup_id, sub_job_id, subjob, repositories_info)
    if ret:
        log.info(f"Job id: {backup_id} exec {step} interface result: {result}")
        return result
    if function_type in function_dict.keys():
        result = function_dict.get(function_type)()
    elif function_type == BackupStepEnum.BACKUP_PROGRESS:
        result, progress, task_status = backup.backup_progress()
        cache = repositories_info.get("cache_repository", [""])[0]
        output = set_log_detail(cache, backup_id, sub_job_id, task_status, progress)
        if task_status == SubJobStatusEnum.COMPLETED.value and \
                check_if_path_in_cache(os.path.join(cache, f'{sub_job_id}{GaussCluster.get_hostname()}lb'), cache):
            output = get_backup_copy_size(backup, sub_job_id, repositories_info, backup_type, output)
        output_result_file(JobData.PID, output.dict(by_alias=True))
    elif function_type == BackupStepEnum.QUERY_BACKUP_COPY:
        copy_id = ParamFileUtil.parse_copy_id(param_protection.get_copy())
        result, output = backup.query_copy_info(copy_id)
        output_result_file(JobData.PID, output.dict(by_alias=True))
    else:
        log.error(f"Job id: {backup_id} exec step: {function_type} failed for step: {function_type} not support")
        return result
    log.info(f"Job id: {backup_id} exec {function_type} interface result: {result}")
    return result


def exec_cluster_job(step, backup_id, sub_job_id, sub_job, repositories_info):
    if not sub_job:
        return False, False
    sub_job_name = sub_job.get("jobName", " ")
    if sub_job_name == "prepare":
        return True, exec_cluster_backup_prepare(step, backup_id, sub_job_id, repositories_info)
    if sub_job_name == "umount_bind":
        return True, exec_cluster_umount_bind(step, backup_id, sub_job_id)
    return False, False


def do_word():
    args = sys.argv[1:]
    if len(args) < 3:
        log.error("Param error")
        return False
    step = args[0]
    JobData.PID = args[1]
    backup_id = args[2]
    sub_job_id = ""
    if len(args) == 4:
        # 查询进度需要记录子任务ID
        sub_job_id = args[3]
    # 校验pid, job_id, sub_job_id
    if not is_valid_id(JobData.PID):
        log.warn(f'pid is invalid!')
        sys.exit(1)
    if not is_valid_id(backup_id):
        log.warn(f'job_id is invalid!')
        sys.exit(1)
    if not is_valid_id(sub_job_id):
        log.warn(f'sub_job_id is invalid!')
        sys.exit(1)
    if step == BackupStepEnum.STOP_TASK_PROGRESS:
        result = query_abort_progress(backup_id)
    elif step.endswith("Progress") and step != BackupStepEnum.BACKUP_PROGRESS:
        result = query_progress(step, backup_id, sub_job_id)
    else:
        result = exec_backup(step, backup_id, sub_job_id)
    return result


if __name__ == "__main__":
    SysData.SYS_STDIN = sys.stdin.readline()
    if not do_word():
        sys.exit(1)
