#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import grp
import json
import os
import pwd
import re
import shutil
import socket
import sys

from pydantic import BaseModel

from common.util.cmd_utils import cmd_format
from gaussdbt.commons.const import Env, RoachConstant, JSON_SCHEMA_PATH
from gaussdbt.commons.database_common import get_database_max_protect_mode, check_if_path_in_cache, \
    gaussdbt_check_user_name_and_injection, get_area_parent_path
from gaussdbt.commons.gaussdbt_common import check_backpy_key
from gaussdbt.commons.gaussdbt_param_protection import ParamProtection
from gaussdbt.commons.gaussdbt_utils import GaussDBTUtils
from gaussdbt.commons.models import BackupJobPermission
from gaussdbt.commons.roach_meta_info import RoachMeta, umount_bind_backup_path_for_delete, mount_bind_path, \
    umount_bind_path
from gaussdbt.resource.gaussdbt_resource import GaussCluster
from common.common import output_result_file, execute_cmd, exter_attack, clean_dir, check_path_legal
from common.common_models import ActionResult, SubJobDetails
from common.const import ExecuteResultEnum, JobData, SubJobStatusEnum, CMDResult, CopyDataTypeEnum, \
    SysData, ParamConstant, RepositoryDataTypeEnum
from common.file_common import exec_lchown_dir_recursively, change_path_permission
from common.logger import Logger
from common.parse_parafile import ParamFileUtil, get_user_name
from common.util.exec_utils import exec_mkdir_cmd, check_path_valid, exec_overwrite_file, exec_cat_cmd

log = Logger().get_logger("gaussdbt_plugin.log")
JOB_ID = ""


@exter_attack
def query_job_permission():
    log.info(f"Exec QueryJobPermission interface")
    Env.USER_NAME = "application_auth_authKey"
    database_user = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    group_id = pwd.getpwnam(str(database_user)).pw_gid
    database_user_group = grp.getgrgid(group_id).gr_name
    output = BackupJobPermission(user=database_user, group=database_user_group, fileMode="0700")
    log.info(f"Exec QueryJobPermission interface success")
    return ExecuteResultEnum.SUCCESS, output


@exter_attack
def check_cluster_status(user_name):
    cluster_state = GaussCluster.get_cluster_state()
    output = ActionResult(code=ExecuteResultEnum.SUCCESS)
    if not cluster_state:
        log.error("The cluster status is empty.")
        output = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, message=f"The cluster status is empty.")
        return False, output
    instance_status = GaussCluster.get_instance_status_by_hostname()
    if cluster_state == "Degraded":
        if instance_status != "ONLINE":
            output = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, bodyErr=1577210008)
            log.error(f"Job id: {JOB_ID} instance status is {instance_status} can not exec job")
            return True, output
        if get_database_max_protect_mode(user_name):
            output = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, bodyErr=1577210008)
            log.error(f"Job id: {JOB_ID} instance is max protect mode can not exec job")
            return True, output
    if cluster_state == "Unavailable":
        log.info("The cluster status is Unavailable.")
        output = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, bodyErr=1577210007,
                              message="The cluster status is Unavailable.")
        return True, output
    log.info(f"The cluster is {cluster_state}")
    return False, output


@exter_attack
def allow_backup_local_node():
    Env.USER_NAME = "job_protectEnv_auth_authKey"
    log.info(f"Job id: {JOB_ID} exec AllowBackupInLocalNode interface")
    try:
        param_dict = ParamFileUtil.parse_param_file_and_valid_by_schema(JobData.PID, JSON_SCHEMA_PATH)
    except Exception:
        log.exception(f"Job id: {JOB_ID} exec delete copy failed for failed to parse param file")
        output = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, message="The current can not exec backup job")
        return ExecuteResultEnum.INTERNAL_ERROR, output
    if "subJob" in param_dict:
        job_dict = param_dict.get("subJob")
        sub_job_name = job_dict.get("jobName", " ")
        if sub_job_name == "prepare" or sub_job_name == "umount_bind":
            output = ActionResult(code=ExecuteResultEnum.SUCCESS)
            return ExecuteResultEnum.SUCCESS, output
    log.info(f"Job id: {JOB_ID} check cluster state")
    user_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    ret, output = check_cluster_status(user_name)
    if ret:
        return ExecuteResultEnum.SUCCESS, output
    instance_status = GaussCluster.get_instance_status_by_hostname()
    if instance_status != "ONLINE":
        output = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, message="The current can not exec backup job")
        log.error(f"Job id: {JOB_ID} instance status is {instance_status} can not exec job")
        return ExecuteResultEnum.SUCCESS, output
    output = ActionResult(code=ExecuteResultEnum.SUCCESS)
    return ExecuteResultEnum.SUCCESS, output


def check_dirs(backup_key_list, backup_key_dirs, file_dir):
    for key in backup_key_list:
        if key not in backup_key_dirs:
            dir_name = os.path.join(file_dir, key)
            exec_mkdir_cmd(dir_name)


def create_association_dir(result_file):
    if not os.path.exists(result_file):
        log.error(f"Delete result file: {result_file} not exists")
        return False
    with open(result_file, "r", encoding='UTF-8') as file:
        progress_info = file.readlines()
    backup_key_list = []
    for item in progress_info:
        if "Deleting backup-key" in item:
            item_list_info = item.split("\t")
            if len(item_list_info) <= 1:
                log.warning(f"Split delete item: {item} info failed")
                continue
            # 包含backup key 信息在第一项且后续按照空格分隔再进行分割
            backup_key_infos = item_list_info[0]
            backup_keys = backup_key_infos.split(" ")
            if len(backup_keys) <= 1:
                log.warning(f"Split backup key: {backup_key_infos} failed")
                continue
            # backup key 在空格分隔中的最后一个
            backup_key = backup_keys[-1]
            backup_key_list.append(backup_key)
    file_dir = os.path.join(RoachConstant.ROACH_DATA_DEL_FILE_PATH, RoachConstant.ROACH_DATA, "roach")
    if not os.path.exists(file_dir):
        log.error("Copy dir not exits create dir failed")
        return False
    backup_key_dirs = os.listdir(file_dir)
    check_dirs(backup_key_list, backup_key_dirs, file_dir)
    meta_dir = os.path.join(RoachConstant.ROACH_META_DEL_FILE_PATH, "roach")
    if not os.path.exists(meta_dir):
        log.error("Copy meta dir not exits create dir failed")
        return False
    backup_key_dirs = os.listdir(meta_dir)
    check_dirs(backup_key_list, backup_key_dirs, meta_dir)
    return True


@exter_attack
def delete_copy(repositories_info, backup_key, copy_type):
    log.info(f"Job id: {JOB_ID} exec delete copy info backup_key: {backup_key}")
    cache_repository_path = repositories_info.get("cache_repository", [""])[0]
    user_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    roach_home = GaussCluster.get_roach_home(user_name)
    if gaussdbt_check_user_name_and_injection(f"{user_name} {roach_home}"):
        log.error("The parameter verification fails.")
        return ExecuteResultEnum.INTERNAL_ERROR
    roach_meta_path = RoachConstant.ROACH_META_DEL_FILE_PATH
    roach_media_path = os.path.join(RoachConstant.ROACH_DATA_DEL_FILE_PATH, RoachConstant.ROACH_DATA)
    roach_meta = RoachMeta(RoachConstant.ROACH_META_DEL_FILE_PATH)
    delete_progress_file = os.path.join(cache_repository_path, f"{JOB_ID}_delete_progress")
    if copy_type != CopyDataTypeEnum.LOG_COPY:
        if roach_meta.check_copy_delete_status(backup_key):
            return already_delete_copy(user_name, delete_progress_file)
    if copy_type == CopyDataTypeEnum.LOG_COPY:
        if delete_log_copy(backup_key):
            return already_delete_copy(user_name, delete_progress_file)
        else:
            log.info(f"delete log copy failed, job id:{JOB_ID}")
            return False
    if copy_type == CopyDataTypeEnum.LOG_COPY:
        param = cmd_format("--backup-key {} --arch-destination {} --metadata-destination {} --arch", backup_key,
                           RoachConstant.ROACH_LOG_FILE_PATH, roach_meta_path)
    else:
        param = cmd_format(
            "--backup-key {} --arch-destination {} --metadata-destination {} --media-destination {} --cascade",
            backup_key, RoachConstant.ROACH_LOG_FILE_PATH, roach_meta_path, roach_media_path)
    cmd = f'su - {user_name} -c "{roach_home}{RoachConstant.ROACH_COPY_DELETE} {param} >{delete_progress_file}"'
    # 先删目录
    backup_key_log_dir = os.path.join(RoachConstant.ROACH_LOG_FILE_PATH, backup_key)
    if os.path.isdir(backup_key_log_dir):
        clean_dir(backup_key_log_dir)
        log.info("Clean backup_key log dir path(%s) success.", backup_key_log_dir)
    return_code, std_out, std_err = execute_cmd(cmd)
    if return_code != CMDResult.SUCCESS:
        if os.path.exists(delete_progress_file):
            with open(delete_progress_file, "r", encoding='UTF-8') as f:
                cmd_err = f.read()
            std_out = f"{std_out}, cmd err: {cmd_err}"
        log.error(f"Fail to delete backup exec cmd:{cmd}, err: {std_out}, std err: {std_err}.")
        return ExecuteResultEnum.INTERNAL_ERROR
    if not create_association_dir(delete_progress_file):
        log.error("Create association dir failed")
        return False
    log.info("Succeed to delete backup")
    return already_delete_copy(user_name, delete_progress_file)


def delete_log_copy(backup_key):
    """
    删除日志备份文件
    参数:
    backup_key: 备份文件的键值。
    返回值:
    True: 表示日志备份文件已被成功删除或不存在。
    异常:
    如果在删除文件或目录时发生异常，会记录错误日志并返回True。
    """
    if not backup_key:
        log.error(f"Backup key is empty.")
        return False
    copy_dir = os.path.join(RoachConstant.ROACH_LOG_FILE_PATH, RoachConstant.ROACH_ARCH, backup_key)
    if not os.path.exists(copy_dir):
        log.info(f"log copy has already been deleted")
        return True
    # 获取待删除目录文件
    copy_path_file_list = os.listdir(copy_dir)
    if not copy_path_file_list:
        log.info(f"log copy dir is empty")
        return True
    # 删除文件
    for copy_path_file in copy_path_file_list:
        del_path = os.path.join(copy_dir, copy_path_file)
        if os.path.isfile(del_path):
            try:
                check_and_del_file(del_path)
            except Exception as exception_str:
                log.error(f"Fail to delete log copy, file path:{del_path}. {exception_str}")
                return False
        else:
            try:
                check_and_del_dir(del_path)
            except Exception as exception_str:
                log.error(f"Fail to delete log copy, dir path:{del_path}. {exception_str}")
                return False
    try:
        check_and_del_dir(copy_dir)
    except Exception as exception_str:
        log.error(f"Fail to delete backup_key dir, dir path:{copy_dir}. {exception_str}")
        return False
    log.info(f"Success to delete log copy, job id:{JOB_ID}")
    return True


def check_and_del_dir(target_dir_path):
    """
    查询目标目录是否存在，存在就递归删除目录
    :return:
    """
    if os.path.exists(target_dir_path):
        if os.path.isdir(target_dir_path):
            shutil.rmtree(target_dir_path)


def check_and_del_file(file_path):
    """
    查询目标文件是否存在，存在则删除
    :return:
    """
    parent_dir = get_area_parent_path()
    if check_path_legal(file_path, parent_dir):
        if os.path.exists(file_path):
            os.remove(file_path)


def already_delete_copy(user_name, delete_progress_file):
    cmd = f'su - {user_name} -c "echo DeleteSuccessfully > {delete_progress_file}"'
    return_code, std_out, std_err = execute_cmd(cmd)
    if return_code != CMDResult.SUCCESS:
        if os.path.exists(delete_progress_file):
            with open(delete_progress_file, "r", encoding='UTF-8') as file:
                cmd_err = file.read()
            std_out = f"{std_out}, cmd err: {cmd_err}"
        log.error(f"Fail to delete backup exec cmd, err: {std_out}, stderr: {std_err}")
        return ExecuteResultEnum.INTERNAL_ERROR
    return ExecuteResultEnum.SUCCESS


@exter_attack
def delete_copies_info():
    log.info(f"Job id: {JOB_ID} exec delete copy interface")
    Env.USER_NAME = "job_protectEnv_auth_authKey"
    action_result = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, message="The delete copy failed")
    try:
        param_protection = ParamProtection(JobData.PID)
    except Exception:
        log.exception(f"Job id: {JOB_ID} exec delete copy failed for failed to parse param file")
        return ExecuteResultEnum.INTERNAL_ERROR, action_result
    copy_info = param_protection.get_copy_info()
    if not copy_info:
        log.error("Delete Copy info size is null")
        return ExecuteResultEnum.INTERNAL_ERROR, action_result
    repositories_info = param_protection.get_job_repository_list()
    if not repositories_info:
        log.error("Get repositories info failed")
        return ExecuteResultEnum.INTERNAL_ERROR, action_result
    copy_extend_info = copy_info[0].get("extendInfo", {})
    backup_key = copy_extend_info.get("backupIndexId", "")
    if not check_backpy_key(backup_key):
        log.error("Check backup key fail")
        return ExecuteResultEnum.INTERNAL_ERROR, action_result
    if param_protection.get_subjob():
        type_result, result = delete_cluster_prepare(param_protection, copy_info[0].get("type"))
        if type_result:
            if result == ExecuteResultEnum.SUCCESS:
                action_result = ActionResult(code=ExecuteResultEnum.SUCCESS)
            return result, action_result
    ret = delete_copy(repositories_info, backup_key, copy_info[0].get("type"))
    if ret != ExecuteResultEnum.SUCCESS:
        delete_copy_cluster_umount("", repositories_info,
                                   copy_info[0].get("type"), param_protection.get_job_extend_info())
        return ret, action_result
    action_result = ActionResult(code=ExecuteResultEnum.SUCCESS)
    return ExecuteResultEnum.SUCCESS, action_result


def remove_progress_file(delete_progress_file, cache_repository_path):
    try:
        if check_if_path_in_cache(delete_progress_file, cache_repository_path):
            os.remove(delete_progress_file)
    except Exception:
        log.error(f"Delete file: {delete_progress_file} failed")
        return False
    return True


@exter_attack
def delete_progress():
    try:
        param_protection = ParamProtection(JobData.PID)
    except Exception:
        log.exception(f"Job id: {JOB_ID} exec delete copy failed for failed to parse param file")
        return False, SubJobStatusEnum.FAILED.value
    log.info(f"Job id: {JOB_ID} exec delete copy progress interface")
    repositories_info = param_protection.get_job_repository_list()
    if not repositories_info:
        log.error("Get repositories info failed")
        return False, SubJobStatusEnum.FAILED.value
    cache_repository_path = repositories_info.get("cache_repository", [""])[0]
    subjob = param_protection.get_subjob()
    subjob_name = subjob.get("jobName")
    host_name = socket.gethostname()
    if subjob_name == "delete_copy_mount":
        progress_file = os.path.join(cache_repository_path, f"delete_mount_progress_{JOB_ID}_{host_name}")
        log.info(f"Delete sub job name:{subjob_name} progress success")
        return True, get_delete_progress_mount(progress_file)
    if subjob_name == "delete_copy_umount":
        progress_file = os.path.join(cache_repository_path, f"delete_umount_progress_{JOB_ID}_{host_name}")
        return True, get_delete_progress_mount(progress_file)
    delete_progress_file = os.path.join(cache_repository_path, f"{JOB_ID}_delete_progress")
    if not os.path.exists(delete_progress_file) or os.path.islink(delete_progress_file):
        log.error(f"Delete progress file not exit :{delete_progress_file}")
        return False, SubJobStatusEnum.FAILED.value
    with open(delete_progress_file, "r", encoding='UTF-8') as file:
        progress_info = file.read()
    if "[FAILURE]" in progress_info or 'Failed' in progress_info or "Error" in progress_info:
        log.error(f"Delete copy failed, error: {progress_info}")
        remove_progress_file(delete_progress_file, cache_repository_path)
        return False, SubJobStatusEnum.FAILED.value
    log.debug(f"Delete progress file info :{progress_info}")
    if "DeleteSuccessfully" in progress_info:
        remove_progress_file(delete_progress_file, cache_repository_path)
        return True, SubJobStatusEnum.COMPLETED.value
    return True, SubJobStatusEnum.RUNNING.value


def get_mount_data_path(extend_info):
    if if_fc_using(extend_info):
        mount_path = RoachConstant.ROACH_DATA_FC_FILE_PATH
    else:
        mount_path = RoachConstant.ROACH_DATA_DEL_FILE_PATH
    return mount_path


def get_mount_meta_path(extend_info):
    if if_fc_using(extend_info):
        mount_path = RoachConstant.ROACH_META_FC_FILE_PATH
    else:
        mount_path = RoachConstant.ROACH_META_DEL_FILE_PATH
    return mount_path


def get_mount_log_path(extend_info):
    if if_fc_using(extend_info):
        mount_path = RoachConstant.ROACH_LOG_FC_FILE_PATH
    else:
        mount_path = RoachConstant.ROACH_LOG_FILE_PATH
    return mount_path


def delete_cluster_prepare(param_protection, copy_type):
    subjob = param_protection.get_subjob()
    repositories_info = param_protection.get_job_repository_list()
    if not repositories_info:
        log.error("Get repositories info failed")
        return False, SubJobStatusEnum.FAILED.value
    cache_repository_path = repositories_info.get("cache_repository", [""])[0]
    user_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    change_path_permission(cache_repository_path, user_name, mode=0o777)
    subjob_name = subjob.get("jobName")
    job_info = param_protection.get_job()
    job_extend_info = param_protection.get_job_extend_info()
    copies = job_info.get("copies", [])
    if not copies:
        log.error("Get copy info failed")
        return True, ExecuteResultEnum.INTERNAL_ERROR
    repositories_info = copies[0].get("repositories", "")
    if not repositories_info:
        log.error("Get repositories info failed")
        return True, ExecuteResultEnum.INTERNAL_ERROR
    if subjob_name == "delete_copy_mount":
        return True, delete_copy_cluster_mount(cache_repository_path, repositories_info, copy_type, job_extend_info)
    elif subjob_name == "delete_copy_umount":
        return True, delete_copy_cluster_umount(cache_repository_path, repositories_info, copy_type, job_extend_info)
    return False, ExecuteResultEnum.SUCCESS


def get_cluster_sub_dir(check_path):
    if GaussDBTUtils.validate_ipv4(check_path):
        log.info(f"New backup copy, sub path: {check_path}.")
        return GaussCluster.get_endpoint_by_hostname()
    log.info(f"Old backup copy, check path: {check_path}.")
    return ""


def _get_mount_params(repository, extend_info):
    """
    组装挂载文件时所需的参数

    :param repository: repo仓信息
    :param extend_info: 扩展信息
    :return: 挂载文件时所需的参数
    """
    Env.USER_NAME = "job_protectEnv_auth_authKey"
    database_user = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    permission = {
        "user": database_user,
        "fileMode": "0700"
    }
    return {
        "repository": [repository],
        "permission": permission,
        "extendInfo": extend_info
    }


def mount_file_system(repository, path, extend_info, is_meta=False, is_log=False):
    log.info(f'remotePath: {repository.get("remotePath")}, is_meta={is_meta}, is_log={is_log}, job id: {JOB_ID}')
    remote_host_array = repository.get("remoteHost", [])
    if not remote_host_array:
        log.error("Get repo remote host failed")
        return False
    repository["remoteHost"] = [remote_host_array[0]]
    temp_list = repository.get("remotePath").split("/")
    if len(temp_list) < 2:
        log.error("Get repo name failed")
        return False
    sub_dir = get_cluster_sub_dir(temp_list[2])
    repository["remotePath"] = os.path.join("/", temp_list[1], sub_dir, "data")
    repository["path"] = [path]
    if if_fc_using(extend_info):
        repository["path"] = [os.path.join(path, temp_list[1], sub_dir, "data")]
    if is_meta:
        if "LogRepository" in repository.get("remotePath"):
            # Agent产生的日志路径type为0，挂载时排除这种情况避免报错
            log.warn(f"Ignore agent meta path: {repository['remotePath']}!")
            return True
        sub_dir = get_cluster_sub_dir(GaussDBTUtils.get_value_at_position(temp_list, 3))
        repository["remotePath"] = os.path.join("/", temp_list[1], temp_list[2], sub_dir, "meta")
        repository["path"] = [path]
        if if_fc_using(extend_info):
            repository["path"] = [os.path.join(path, temp_list[1], temp_list[2], sub_dir, "meta")]
    if is_log:
        sub_dir = GaussDBTUtils.validate_ipv4(GaussDBTUtils.get_value_at_position(temp_list, 3))
        repository["remotePath"] = os.path.join("/", temp_list[1], "data", sub_dir)
    param_path = os.path.join(ParamConstant.PARAM_FILE_PATH, f"param_{JOB_ID}")
    result_path = os.path.join(ParamConstant.RESULT_PATH, f"result_{JOB_ID}")
    if os.path.exists(param_path) and check_path_valid(param_path):
        os.remove(param_path)
    param_json = _get_mount_params(repository, extend_info)
    log.debug(f"Mount param is {param_json}")
    try:
        exec_overwrite_file(param_path, param_json)
    except Exception as exception_str:
        log.error(f"Create file failed {exception_str}")
        return False
    cmd = f"{os.path.join(ParamConstant.BIN_PATH, 'rpctool.sh')} 'MountRepositoryByPlugin' {param_path} {result_path}"
    ret, out, err = execute_cmd(cmd)
    if ret != CMDResult.SUCCESS:
        log.error(f"An error occur in execute cmd. ret:{out} err:{err}")
        return False
    return True


def mount_log_info(repositories_info, cache_path, extend_info):
    host_name = socket.gethostname()
    progress_file = os.path.join(cache_path, f"delete_mount_progress_{JOB_ID}_{host_name}")
    log_repo = dict()
    for rep_item in repositories_info:
        if rep_item.get("repositoryType") == RepositoryDataTypeEnum.LOG_REPOSITORY:
            log_repo = rep_item
            break
    if not os.path.ismount(RoachConstant.ROACH_LOG_FILE_PATH):
        is_mount_file = os.path.join(cache_path, f"mount_file_{JOB_ID}_{host_name}")
        exec_mkdir_cmd(is_mount_file)
        mount_path = get_mount_log_path(extend_info)
        if not mount_file_system(log_repo, mount_path, extend_info, False, True):
            log.error("Mount log repo failed")
            update_delete_mount_progress("Failed", progress_file)
            return ExecuteResultEnum.INTERNAL_ERROR
        if if_fc_using(extend_info):
            sub_dir = get_cluster_sub_dir(log_repo.get("remotePath", "").split("/")[3])
            if not mount_bind_path(os.path.join(RoachConstant.ROACH_LOG_FC_FILE_PATH,
                                                log_repo.get("remotePath", "").split("/")[1], "data", sub_dir),
                                   RoachConstant.ROACH_LOG_FILE_PATH):
                log.error("Mount bind log path failed")
                return ExecuteResultEnum.INTERNAL_ERROR
    return ExecuteResultEnum.SUCCESS


def if_fc_using(extend_info):
    ret, out = exec_cat_cmd("/etc/HostSN/HostSN")
    if not ret:
        log.warning(f"Fail to get agent id")
        return False
    agent_id = out.strip()
    if not extend_info.get("fibreChannel", ""):
        log.warning(f"Fail to get FC")
        return False
    fc_info = json.loads(extend_info.get("fibreChannel"))
    if not fc_info.get(agent_id, ""):
        log.warning(f"Fail to get FC")
        return False
    if fc_info.get(agent_id) == "true":
        return True
    return False


def get_repos_info(repositories_info):
    data_repo = dict()
    meta_repos = []
    for rep_item in repositories_info:
        if rep_item.get("repositoryType") == RepositoryDataTypeEnum.DATA_REPOSITORY:
            data_repo = rep_item
            break
    for rep_item in repositories_info:
        if rep_item.get("repositoryType") == RepositoryDataTypeEnum.META_REPOSITORY:
            meta_repos.append(rep_item)
    return data_repo, meta_repos


def delete_copy_cluster_mount(cache_path, repositories_info, copy_type, extend_info):
    log.info(f"Job id: {JOB_ID} exec delete copy mount ")
    host_name = socket.gethostname()
    is_mount_file = os.path.join(cache_path, f"mount_file_{JOB_ID}_{host_name}")
    umount_bind_backup_path_for_delete(is_mount_file)
    progress_file = os.path.join(cache_path, f"delete_mount_progress_{JOB_ID}_{host_name}")
    Env.USER_NAME = "job_protectEnv_auth_authKey"
    database_user = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    change_path_permission(cache_path, database_user)
    update_delete_mount_progress("Running", progress_file)
    if if_fc_using(extend_info):
        umount_fc_paths(repositories_info, copy_type)
    data_repo, meta_repos = get_repos_info(repositories_info)
    mount_path = get_mount_data_path(extend_info)
    if data_repo and not mount_file_system(data_repo, mount_path, extend_info, False, False):
        log.error("Mount data repo failed")
        update_delete_mount_progress("Failed", progress_file)
        return ExecuteResultEnum.INTERNAL_ERROR
    if if_fc_using(extend_info):
        sub_dir = get_cluster_sub_dir(data_repo.get("remotePath", "").split("/")[2])
        if not mount_bind_path(os.path.join(RoachConstant.ROACH_DATA_FC_FILE_PATH,
                                            data_repo.get("remotePath", "").split("/")[1], sub_dir, "data"),
                               RoachConstant.ROACH_DATA_DEL_FILE_PATH):
            log.error("Mount bind data path failed")
            return ExecuteResultEnum.INTERNAL_ERROR
    for meta_repo in meta_repos:
        mount_path = get_mount_meta_path(extend_info)
        if not mount_file_system(meta_repo, mount_path, extend_info, True, False):
            log.error("Mount meta repo failed")
            update_delete_mount_progress("Failed", progress_file)
            return ExecuteResultEnum.INTERNAL_ERROR
        if if_fc_using(extend_info):
            sub_dir = get_cluster_sub_dir(meta_repo.get("remotePath", "").split("/")[3])
            if "LogRepository" not in meta_repo.get("remotePath") and \
                    not mount_bind_path(os.path.join(RoachConstant.ROACH_META_FC_FILE_PATH,
                                                     meta_repo.get("remotePath", "").split("/")[1],
                                                     meta_repo.get("remotePath", "").split("/")[2], sub_dir, "meta"),
                                        RoachConstant.ROACH_META_DEL_FILE_PATH):
                log.error("Mount bind meta path failed")
                return ExecuteResultEnum.INTERNAL_ERROR
    if copy_type == CopyDataTypeEnum.LOG_COPY:
        if mount_log_info(repositories_info, cache_path, extend_info) != ExecuteResultEnum.SUCCESS:
            return ExecuteResultEnum.INTERNAL_ERROR
    update_delete_mount_progress("Success", progress_file)
    log.info(f"Job id: {JOB_ID} exec delete copy mount success")
    return ExecuteResultEnum.SUCCESS


def update_delete_mount_progress(status, delete_progress_file):
    Env.USER_NAME = "job_protectEnv_auth_authKey"
    database_user = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    cmd = f'su - {database_user} -c "echo {status} > {delete_progress_file}"'
    return_code, std_out, std_err = execute_cmd(cmd)
    if return_code != CMDResult.SUCCESS:
        if os.path.exists(delete_progress_file):
            with open(delete_progress_file, "r", encoding='UTF-8') as file:
                cmd_err = file.read()
            std_out = f"{std_out}, cmd err: {cmd_err}"
        log.error(f"Fail to delete backup exec cmd, err: {std_out}, stderr: {std_err}")
        return False
    return True


def get_delete_progress_mount(delete_progress_file):
    if not os.path.exists(delete_progress_file) or os.path.islink(delete_progress_file):
        log.error(f"Delete progress file not exit :{delete_progress_file}")
        return SubJobStatusEnum.FAILED.value
    with open(delete_progress_file, "r", encoding='UTF-8') as file:
        progress_info = file.read()
    if "Failed" in progress_info:
        return SubJobStatusEnum.FAILED.value
    if "Success" in progress_info:
        return SubJobStatusEnum.COMPLETED.value
    if "Running" in progress_info:
        return SubJobStatusEnum.RUNNING.value
    return SubJobStatusEnum.FAILED.value


def umount_fc_paths(repositories_info, copy_type):
    data_repo = dict()
    meta_repos = []
    for rep_item in repositories_info:
        if rep_item.get("repositoryType") == RepositoryDataTypeEnum.DATA_REPOSITORY:
            data_repo = rep_item
            break
    for rep_item in repositories_info:
        if rep_item.get("repositoryType") == RepositoryDataTypeEnum.META_REPOSITORY:
            meta_repos.append(rep_item)
    if not umount_bind_path(os.path.join(RoachConstant.ROACH_DATA_FC_FILE_PATH,
                                         data_repo.get("remotePath", "").split("/")[1])):
        log.warning("Umount bind data path failed")
        return False
    for meta_repo in meta_repos:
        if "LogRepository" not in meta_repo.get("remotePath") and \
                not umount_bind_path(os.path.join(RoachConstant.ROACH_META_FC_FILE_PATH,
                                                  meta_repo.get("remotePath", "").split("/")[1])):
            log.warning("Umount bind meta path failed")
            return False
    if copy_type == CopyDataTypeEnum.LOG_COPY:
        log_repo = dict()
        for rep_item in repositories_info:
            if rep_item.get("repositoryType") == RepositoryDataTypeEnum.LOG_REPOSITORY:
                log_repo = rep_item
                break
        if not umount_bind_path(os.path.join(RoachConstant.ROACH_LOG_FC_FILE_PATH,
                                             log_repo.get("remotePath", "").split("/")[1])):
            log.warning("Umount bind log path failed")
            return False
    return True


def delete_copy_cluster_umount(cache_path, repositories_info, copy_type, job_extend_info):
    log.info(f"Job id: {JOB_ID} exec delete copy umount ")
    host_name = socket.gethostname()
    progress_file = os.path.join(cache_path, f"delete_umount_progress_{JOB_ID}_{host_name}")
    if cache_path != "":
        update_delete_mount_progress("Running", progress_file)
    is_mount_file = os.path.join(cache_path, f"mount_file_{JOB_ID}_{host_name}")
    umount_bind_backup_path_for_delete(is_mount_file)
    if if_fc_using(job_extend_info):
        umount_fc_paths(repositories_info, copy_type)
    if cache_path != "":
        update_delete_mount_progress("Success", progress_file)
    log.info(f"Job id: {JOB_ID} exec delete copy umount success ")
    return ExecuteResultEnum.SUCCESS


def do_work(interface_type):
    result = ExecuteResultEnum.INTERNAL_ERROR
    out_info = BaseModel()
    if interface_type == "QueryJobPermission":
        result, out_info = query_job_permission()
    elif interface_type == "AllowBackupInLocalNode":
        result, out_info = allow_backup_local_node()
    elif interface_type == "DeleteCopy":
        result, out_info = delete_copies_info()
    elif interface_type == "DeleteProgress":
        sub_job_id = sys.argv[4]
        out_info = SubJobDetails(taskId=JOB_ID, subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.FAILED.value,
                                 progress=int(100))
        _, status = delete_progress()
        out_info.task_status = status
        result = ExecuteResultEnum.SUCCESS
        log.debug(f"Report Detail job: {out_info.dict(by_alias=True)}")
    output_result_file(JobData.PID, out_info.dict(by_alias=True))
    return result


if __name__ == "__main__":
    func_type = sys.argv[1]
    JobData.PID = sys.argv[2]
    if func_type != "QueryJobPermission":
        JOB_ID = sys.argv[3]
    SysData.SYS_STDIN = sys.stdin.readline()
    sys.exit(do_work(func_type))
