#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import glob
import json
import os
import re
import socket
import sqlite3
import time
import uuid

from pathlib import Path

import psutil

from dws.commons.common import log, is_proxy_node, get_roach_ip_and_port, is_intrusive, \
    get_cur_agent_id_from_param, del_path_without_exception, get_result_path, get_process_cmd, \
    get_cur_host_all_ip, record_subtask_info_to_file, get_subtask_info_from_file, check_subtask_info, \
    dws_exec_cmd_by_type, get_dws_config, copy_file_into_sandbox, \
    open_non_secret, close_non_secret, mount_bind_sandbox_path, select_available_port, construct_roach_param, \
    read_result_file, delete_result_path, construct_dws_cmd, set_file_attribute, get_file_attribute, \
    save_business_config, convert_to_timestamp, split_line_in_table_list_file, compare_version
from common.util.cmd_utils import cmd_format
from common.util.exec_utils import su_exec_rm_cmd
from common.file_common import change_path_permission, exec_lchown_dir_recursively
from common.util.exec_utils import exec_mkdir_cmd
from common.util.common_utils import get_group_name_by_os_user
from common.common import execute_cmd, check_path_legal, output_result_file
from common.common_models import Copy, CopyInfoRepModel, SubJobDetails, LogDetail, ActionResult, SubJobModel
from common.const import SubJobStatusEnum, ExecuteResultEnum, BackupTypeEnum, DBLogLevel, PathConstant, \
    SubJobPolicyEnum
from common.util.scanner_utils import scan_dir_size
from common.job_const import ParamKeyConst
from common.number_const import NumberConst
from dws.backup.distribute_dws_nodes import DistributeDwsNodes
from common.env_common import get_install_head_path
from dws.commons.const import IntrusiveMode, TaskType, DwsRetEnum, BackupResType, CopyGenerateType, SubJobType, \
    RpcParamKey, JsonKey, DwsBackupLabel, PERMISSION_755, PERMISSION_700, \
    PERMISSION_600, PERMISSION_640, PERMISSION_644, DwsDeployType, DwsRoachPort, DwsBackupStatus, BusinessConfigType
from dws.commons.dws_exception import ErrCodeException
from dws.commons.dws_param_parse import CopyInfoParam
from dws.commons.error_code import DwsErrorCode
from dws.commons.function_tool import log_start, out_result, progress_notify, get_register_ip
from dws.commons.job_info import JobInfo
from dws.commons.progress_notify import get_metadata_path, get_progress_path, read_file, write_file, write_lines, \
    invoke_rpc_tool_interface, get_total_data_size_from_speed_file
from dws.database.db_models import DwsIndex
from dws.database.db_sqlite import DWSIndexFile
from dws.dws_resource import DwsResource
from dws.resource.dws_cluster import DwsCluster
from dws.resource.dws_common import DwsCommon
from dws.resource.query_interface import QueryRes

NO_PERMISSION_IN_METADATA_PATH_STR = "mkdir: cannot create directory"
NO_TABLES_IN_DATABASE_STR = "No table present in current database"
ANOTHER_ROACH_BACKUP_IS_RUNNING_STR_LIST = [
    "Another instance of roach backup execution is in progress",
    "Another Roach Fined grained Python operation is already running",
    "Another Roach Python operation is already running"
]
GAUSS_ROAH_CMD_SUCCESS = "1"
TABLE_LIST_SUPPORT_VERSION = "8.2.1"
SCHEMA_PAGE_SIZE = 100
TABLE_PAGE_SIZE = 5000


class ClusterBackup:

    def __init__(self, pid):
        self._pid = pid
        self._python_version = "python2"
        self._concrete_object_db = "backupkey.db"  # 对象数据聚合后的临时文件
        self._list_path = ""
        self._process_count = 16
        self._db_url = None
        self._db = None
        self.cluster_uuid = None
        self.database_uuid_dict = {}
        self.schema_uuid_dict = {}

    @staticmethod
    @log_start()
    def create_path_white_sub_job(copy_id, cache_path, meta_path, host_ip):
        """
        创建 元数据区  /meta/copy_id/objectmeta/{hostKey}  和 speed/{hostKey} 目录
        集群节点不创建 host_ip  这级目录
        :param copy_id:
        :param meta_path:
        :param host_ip:
        :return:
        """
        meta_path_object_host = os.path.join(meta_path, 'meta', copy_id, 'objectmeta', host_ip)
        if not os.path.exists(meta_path_object_host):
            exec_mkdir_cmd(meta_path_object_host)
        change_path_permission(meta_path_object_host, user_name="rdadmin", mode=PERMISSION_700)

        # 创建speed/{host_key} 目录
        cache_path_speed = os.path.join(cache_path, 'tmp', copy_id, 'speed', host_ip)
        if not os.path.exists(cache_path_speed):
            exec_mkdir_cmd(cache_path_speed)
        change_path_permission(cache_path_speed, user_name="rdadmin", mode=PERMISSION_700)

        return True

    @staticmethod
    @log_start()
    def get_index_file_path(job_info: JobInfo):
        db_path = os.path.join(job_info.meta_path, 'meta', job_info.copy_id, 'sqlite/copymetadata.sqlite')
        return db_path

    @staticmethod
    @log_start()
    def anglyze_err_info(job_info: JobInfo, err_info: str):
        """
        根据不同的错误信息返回对应的错误码
        """
        if NO_PERMISSION_IN_METADATA_PATH_STR in err_info:
            log.error(f"User [{job_info.usr}] has no permission in " \
                      f"[{job_info.metadata_destination}], {job_info.log_format()}.")
            raise ErrCodeException(
                log_detail=LogDetail(
                    logInfo=DwsBackupLabel.BACKUP_SUBJOB_FAILED_LABEL,
                    logDetail=DwsErrorCode.NO_PERMISSION_IN_METADATA_PATH,
                    logDetailParam=[job_info.usr, job_info.metadata_destination],
                    logTimestamp=int(time.time()),
                    logLevel=DBLogLevel.ERROR))
        elif NO_TABLES_IN_DATABASE_STR in err_info:
            log.error(f"Database [{job_info.res_name}] has no tables, {job_info.log_format()}.")
            raise ErrCodeException(
                log_detail=LogDetail(
                    logInfo=DwsBackupLabel.BACKUP_SUBJOB_FAILED_LABEL,
                    logDetail=DwsErrorCode.NO_TABLES_IN_DATABASE,
                    logDetailParam=[job_info.res_name],
                    logTimestamp=int(time.time()),
                    logLevel=DBLogLevel.ERROR))
        elif any([err_msg in err_info for err_msg in ANOTHER_ROACH_BACKUP_IS_RUNNING_STR_LIST]):
            log.error(f"Another roach backup job is already running, {job_info.log_format()}.")
            raise ErrCodeException(
                log_detail=LogDetail(
                    logInfo=DwsBackupLabel.BACKUP_SUBJOB_FAILED_LABEL,
                    logDetail=DwsErrorCode.ANOTHER_ROACH_BACKUP_IS_RUNNING,
                    logTimestamp=int(time.time()),
                    logLevel=DBLogLevel.ERROR))
        elif "ERROR" in err_info or "failed" in err_info:
            log.error(f"Sub job backup_failed , {job_info.log_format()}.")
            raise ErrCodeException(
                log_detail=LogDetail(
                    logInfo=DwsBackupLabel.BACKUP_SUBJOB_FAILED_LABEL,
                    logDetail=DwsErrorCode.SUB_JOB_BACKUP_FAILED,
                    logTimestamp=int(time.time()),
                    logLevel=DBLogLevel.ERROR))

    @staticmethod
    def _merge_host_key_db(db_file_list, object_cur, object_conn):
        for db_file in db_file_list:
            temp_object_conn = sqlite3.connect(db_file)
            temp_object_cur = temp_object_conn.cursor()
            for line in temp_object_cur.execute("select * from BsaObjTable").fetchall():
                cur_object_name = line[2]
                str_line = str(line).replace("None", "''")
                object_cur.execute(f"insert into BsaObjTable values {str_line}")
                query_sql = f"select * from BsaObjTable where objectName = '{cur_object_name}'"
                query_result = object_cur.execute(query_sql).fetchall()
                # 输出新旧两个文件的路径
                object_conn.commit()
            temp_object_cur.close()
            temp_object_conn.close()
        object_cur.close()
        object_conn.close()

    @staticmethod
    def get_all_roach_client_port_not_same_log_detail():
        return LogDetail(logInfo=DwsBackupLabel.BACKUP_SUBJOB_FAILED_LABEL,
                         logDetail=DwsErrorCode.ALL_ROACH_CLIENT_PORT_NOT_SAME.value,
                         logTimestamp=int(time.time()),
                         logLevel=DBLogLevel.ERROR)

    @staticmethod
    def update_progress(job_info: JobInfo, task_status, progress):
        write_file(get_progress_path(job_info.copy_id, job_info.sub_job_id, job_info.cache_path),
                   json.dumps(SubJobDetails(taskId=job_info.job_id, subTaskId=job_info.sub_job_id,
                                            taskStatus=task_status, progress=progress).dict(by_alias=True)))

    @classmethod
    @log_start()
    def clear_cache(cls, job_info: JobInfo):
        """
        函数功能: 任务结束后，删除元数据信息
        参数: @job_info: 任务相关信息
        """
        log.info(f"Clear cache info, {job_info.log_format()}.")

        if not job_info.cache_path:
            log.error(f"cache_path path error, {job_info.log_format()}.")
            return False

        if not su_exec_rm_cmd(os.path.join(job_info.cache_path, "tmp")):
            log.warning(f"Delete cache tmp dir, {job_info.log_format()}.")
        if not su_exec_rm_cmd(os.path.join(job_info.cache_path, "meta")):
            log.warning(f"Delete cache meta dir, {job_info.log_format()}.")

        log.info(f"Del cache res suc. {job_info.log_format()}")
        return True

    @classmethod
    def get_repo_list(cls, job_info: JobInfo):
        """
        功能描述： 从任务参数中解析出x8000和文件系统的对应关系
        参数：
        @job_info: JobInfo 任务信息
        返回值： list
        """
        res_list = []
        for rep in job_info.data_reps:
            role = rep.get("role")
            device_sn = rep.get("extendInfo").get("esn")
            fs_dict = {
                "id": rep.get("extendInfo", {}).get("fsId", ""),
                "name": rep.get("remotePath", "").strip("/"),
                "sharePath": rep.get("remotePath", "").strip("/"),
                "mountPath": rep.get("path")
            }

            # 判断当前文件系统属于哪个X8000
            not_found = True
            for res in res_list:
                if res.get("deviceSN", "") == device_sn:
                    res.get("filesystems", []).append(fs_dict)
                    not_found = False

            if not_found:
                res_list.append({"role": role, "deviceSN": device_sn, "filesystems": [fs_dict]})

        return res_list

    @classmethod
    @log_start()
    def save_task_info_to_cache(cls, job_info: JobInfo, task_type, host_ip):
        """
            功能描述：写入cache仓下/tmp/copy_id/taskInfo_{hostKey}.txt
            参数：
            @job_info: JobInfo 任务信息
            返回值: True or False
        """
        task_info = {
            "repositories": cls.get_repo_list(job_info),
            "taskType": task_type,
            "copyType": job_info.backup_type
        }
        task_path = os.path.join(job_info.cache_path, 'tmp', job_info.copy_id, f'taskInfo_{host_ip}.txt')
        # 获取文件系统、逻辑端口，保存到agent的cache仓
        write_file(task_path, json.dumps(task_info))

        change_path_permission(task_path, user_name="rdadmin", mode=PERMISSION_600)

        log.info(f"Save file system info suc, {job_info.log_format()}.")
        return True

    @classmethod
    @log_start()
    def cluster_is_balanced(cls, usr, env_path):
        """
        功能描述：写入cache仓下/tmp/copy_id/taskInfo.txt
         参数：
            @usr: 用户名
            @env_path: 环境变量信息
        返回值： True 可以备份
        """
        if not usr or not env_path:
            log.error('Param err')
            return False
        return DwsCluster(usr, env_path).is_balanced()

    @classmethod
    @log_start()
    def create_data_rep_1(cls, job_info):
        """
            功能描述：前置中创建 data目录和 data/copy_id目录   root:root 755
            参数：
            @job_info: JobInfo
            返回值: True or False
        """

        for rep in job_info.data_reps:
            if len(rep.get("path")) <= 0:
                log.error("Data rep info err.")
                return False
            data_path = os.path.join(rep.get("path")[0], 'data', job_info.copy_id)
            if not os.path.exists(data_path):
                exec_mkdir_cmd(data_path)
                log.info("Mkdir data path suc.")

            data_path_1 = os.path.join(rep.get("path")[0], 'data')
            change_path_permission(data_path_1, user_name="root", mode=PERMISSION_755)

            data_path_2 = os.path.join(rep.get("path")[0], 'data', job_info.copy_id)
            change_path_permission(data_path_2, user_name="root", mode=PERMISSION_755)

            log.info("Chmod data path suc")

        return True

    @classmethod
    @log_start()
    def create_data_rep_2(cls, job_info, host_ip, cur_user):
        """
            功能描述：白名单任务中创建 data/copy_id/{hostKey} 目录 仓库信息 权限755
            参数：
            @job_info: JobInfo
            返回值: True or False
        """

        for rep in job_info.data_reps:
            temp_path = rep.get("path", [])
            if not temp_path:
                log.warn(f"No data path.")
                continue
            mount_path = temp_path[0]

            mount_path_parent = Path(mount_path).parent
            change_path_permission(mount_path_parent, cur_user, mode=PERMISSION_700)

            change_path_permission(mount_path, user_name="root", mode=PERMISSION_755)

            data_path = os.path.join(mount_path, 'data', job_info.copy_id, host_ip)
            if not os.path.exists(data_path):
                exec_mkdir_cmd(data_path)
                log.info("Mkdir data path suc.")

            change_path_permission(data_path, cur_user, mode=PERMISSION_755)
            log.info("Chmod data path suc")

        return True

    @classmethod
    @log_start()
    def pre_job_create_gds_res(cls, copy_id, cache_path, meta_path):
        if not copy_id or not cache_path or not meta_path:
            log.error(f"Param err. cache_path: {cache_path}, meta_path: {meta_path}, copy_id {copy_id}")
            return False
        # cache仓目录
        cache_path_gds = os.path.join(cache_path, 'tmp', copy_id, 'gds')
        if not os.path.exists(cache_path_gds):
            exec_mkdir_cmd(cache_path_gds)
            log.info(f"Cache path does not exist, create cache path {cache_path_gds}.")

        # /tmp 和 /tmp/copy_id/
        change_path_permission(os.path.join(cache_path, 'tmp'), user_name="root", mode=PERMISSION_755)
        change_path_permission(os.path.join(cache_path, 'tmp', copy_id), user_name="root", mode=PERMISSION_755)
        # /tmp/copy_id/gds
        change_path_permission(cache_path_gds, user_name="root", mode=PERMISSION_700)

        # meta仓目录
        meta_path_sqlite = os.path.join(meta_path, 'meta', copy_id, 'sqlite')
        if not os.path.exists(meta_path_sqlite):
            exec_mkdir_cmd(meta_path_sqlite)
            log.info(f"Meta path does not exist, create meta path {meta_path_sqlite}.")

        # /meta 和 /meta/copy_id/
        change_path_permission(os.path.join(meta_path, 'meta'), user_name="root", mode=PERMISSION_755)
        change_path_permission(os.path.join(meta_path, 'meta', copy_id), user_name="root", mode=PERMISSION_755)
        # /meta/copy_id/sqlite
        change_path_permission(meta_path_sqlite, user_name="root", mode=PERMISSION_700)
        log.info("Chmod meta path suc")

        return True

    @classmethod
    @log_start()
    def pre_job_create_res(cls, copy_id, cache_path, meta_path):
        """
            功能描述：前置任务创建一些资源
            参数：
            @job_info: JobInfo
            返回值: True or False
        """
        if (not cache_path and not meta_path) or not copy_id:
            log.error(f"Param err. cache_path: {cache_path}, meta_path: {meta_path}, copy_id {copy_id}")
            return False

        # cache目录
        cache_path_meta_object = os.path.join(cache_path, 'meta', copy_id, 'objectmeta')
        cache_path_download = os.path.join(cache_path, 'meta', copy_id, 'download')
        cache_path_speed = os.path.join(cache_path, 'tmp', copy_id, 'speed')
        cache_path_client = os.path.join(cache_path, 'tmp', copy_id, 'roach_client')
        cache_path_relation = os.path.join(cache_path, 'meta', copy_id, 'replication')
        # 创文件夹
        if not os.path.exists(cache_path_meta_object):
            exec_mkdir_cmd(cache_path_meta_object)
        if not os.path.exists(cache_path_download):
            exec_mkdir_cmd(cache_path_download)
        if not os.path.exists(cache_path_speed):
            exec_mkdir_cmd(cache_path_speed)
        if not os.path.exists(cache_path_client):
            exec_mkdir_cmd(cache_path_client)
        if not os.path.exists(cache_path_relation):
            exec_mkdir_cmd(cache_path_relation)

        # /meta 和 /meta/copy_id/
        change_path_permission(os.path.join(cache_path, 'meta'), user_name="root", mode=PERMISSION_755)
        change_path_permission(os.path.join(cache_path, 'meta', copy_id), user_name="root", mode=PERMISSION_755)

        # /meta/copy_id/objectmeta
        change_path_permission(cache_path_meta_object, user_name="root", mode=PERMISSION_755)

        # /meta/copy_id/download
        change_path_permission(cache_path_download, user_name="root", mode=PERMISSION_755)

        # /tmp 和 /tmp/copy_id/
        change_path_permission(os.path.join(cache_path, 'tmp'), user_name="root", mode=PERMISSION_755)
        change_path_permission(os.path.join(cache_path, 'tmp', copy_id), user_name="root", mode=PERMISSION_755)

        # /tmp/copy_id/speed
        change_path_permission(cache_path_speed, user_name="root", mode=PERMISSION_755)
        # /tmp/copy_id/roach_client
        change_path_permission(cache_path_client, user_name="root", mode=PERMISSION_700)
        # /tmp/copy_id/replication
        change_path_permission(cache_path_relation, user_name="root", mode=PERMISSION_755)

        # meta目录
        if not meta_path:
            log.warn("No meta path.")
            return True

        return ClusterBackup.pre_job_create_meta_dir(copy_id, meta_path)

    @classmethod
    @log_start()
    def pre_job_create_meta_dir(cls, copy_id, meta_path):
        meta_path_sqlite = os.path.join(meta_path, 'meta', copy_id, 'sqlite')
        meta_path_object = os.path.join(meta_path, 'meta', copy_id, 'objectmeta')

        # 创文件夹
        if not os.path.exists(meta_path_sqlite):
            exec_mkdir_cmd(meta_path_sqlite)
        if not os.path.exists(meta_path_object):
            exec_mkdir_cmd(meta_path_object)

        # 改属组、权限
        change_path_permission(os.path.join(meta_path, 'meta'), user_name="root", mode=PERMISSION_755)
        change_path_permission(os.path.join(meta_path, 'meta', copy_id), user_name="root", mode=PERMISSION_755)
        change_path_permission(meta_path_sqlite, user_name="root", mode=PERMISSION_700)

        change_path_permission(meta_path_object, user_name="root", mode=PERMISSION_755)

        return True

    @classmethod
    @log_start()
    def change_xbsa_mod(cls, usr):
        """
        功能描述：改变xbsa一些目录的权限
        参数：
        @job_info: JobInfo 任务信息
        返回值: True or False
        """
        if not usr:
            log.error(f"Param usr err. usr: {usr}")
            return False

        # 侵入式部署时,需要修改xbsa目录权限
        xbsa_install_dir = f"{PathConstant.XBSA_PATH}/"
        if not os.path.isdir(xbsa_install_dir):
            log.error(f"Xbsa install dir is not exist. dir: {xbsa_install_dir}.")
            return False

        exec_lchown_dir_recursively(xbsa_install_dir, usr, get_group_name_by_os_user(usr))
        log.info("Chown xbsa install recursively suc.")
        return True

    @classmethod
    def check_roach_port(cls, ip_port_path, job_info: JobInfo):
        """
        功能描述： 检查所有agent port是否一致
        返回值： 成功 返回ip列表和port
                错误 返回false
        """
        ip_info = []
        proxy_node_cnt = 0
        for file in os.listdir(ip_port_path):
            full_name = os.path.join(ip_port_path, file)
            if os.path.isfile(full_name):
                ip_info.append(read_file(full_name))
                proxy_node_cnt += 1
        if not proxy_node_cnt:
            log.error(f"Get ip info from file failed.")
            raise ErrCodeException(
                log_detail=LogDetail(
                    logDetail=DwsErrorCode.ROACH_CLIENT_PROCESS_NOT_EXISTS,
                    logDetailParam=[",".join(job_info.host_agents)],
                    logTimestamp=int(time.time()),
                    logLevel=DBLogLevel.ERROR))

        tmp_dict = {}
        for item in ip_info:
            if 'port' not in item or 'ip' not in item:
                continue
            original_ip = tmp_dict.get(item.get('port'), [])
            original_ip.append(item.get('ip'))
            tmp_dict.update({item.get('port'): original_ip})

        for port in tmp_dict:
            ip_list = tmp_dict.get(port, [])
            if len(ip_list) >= proxy_node_cnt:
                log.info('Check roach port suc.')
                return True, ip_list, port

        return False, [], "0"

    @classmethod
    @log_start()
    def get_total_data_size(cls, job_info: JobInfo):
        """
        汇总已备份的数据总量, 单位MB
        """
        if job_info.backup_tool_type == CopyGenerateType.GENERATE_BY_GDS:
            fs_list = []
            for x8000 in ClusterBackup.get_repo_list(job_info):
                file_systems = x8000.get("filesystems", [])
                for item in file_systems:
                    item["x8000_esn"] = x8000.get("deviceSN")
                    fs_list.append(item)
            total_data_size = ClusterBackup.get_gds_total_data_size(fs_list, job_info)
            log.info(f"GDS get total data size {total_data_size} MB, {job_info.log_format()}.")
            return total_data_size
        total_data_size = get_total_data_size_from_speed_file(job_info.cache_path, job_info.copy_id, job_info.job_id)
        log.info(f"Get total data size {total_data_size} MB, {job_info.log_format()}.")
        return total_data_size

    @classmethod
    @log_start()
    def get_gds_total_data_size(cls, fs_list, job_info):
        gds_total_data_size = 0
        for file_system in fs_list:
            tmp_fs_path = os.path.join(file_system.get("mountPath")[0], "data", job_info.copy_id)
            ret, total_size = scan_dir_size(job_info.copy_id, tmp_fs_path)
            if ret:
                log.debug(f"Get fs {tmp_fs_path} total size {total_size} KB.")
                gds_total_data_size = gds_total_data_size + total_size
        return int(gds_total_data_size / 1024)

    @classmethod
    def dws_exec_cmd(cls, sh_cmd: str, result_path: str):
        error_ret = "2"
        out_info = ""
        if not sh_cmd.startswith("su - ") or not result_path:
            log.error("Invalid param.")
            return error_ret, out_info
        real_cmd = sh_cmd[:-1] + f" >& {result_path}" + sh_cmd[-1]
        try:
            cmd_ret, _, _ = execute_cmd(real_cmd)
        except Exception:
            log.error("Execute shell cmd exception.")
            delete_result_path(result_path)
            return DwsRetEnum.FAILED, out_info
        read_ret, out_info = read_result_file(result_path)
        delete_result_path(result_path)
        if not read_ret:
            log.error("Read result file failed.")
            return cmd_ret, out_info
        dws_config = get_dws_config()
        deploy_type = int(dws_config.get("deployType", DwsDeployType.DEPLOY_IN_SERVER.value))
        if deploy_type == DwsDeployType.DEPLOY_IN_SANDBOX.value:
            out_info = out_info.split("\n")
            auth_next_index = 0
            line_num = 0
            for each_line in out_info:
                if "Authorized users only" in each_line:
                    auth_next_index = line_num + 1
                    break
                line_num += 1
            out_info = out_info[auth_next_index:]
            out_info = "\n".join(out_info)
        log.info("DWS exec shell cmd suc.")
        return cmd_ret, out_info

    @classmethod
    def dws_exec_backup_cmd_by_type(cls, user: str, env_file: str, cmd: str):
        error_ret = "2"
        exec_cmd = construct_dws_cmd(user, cmd, env_file)
        if not exec_cmd:
            log.error("Construct dws cmd failed")
            return error_ret, "Construct dws cmd failed"
        result_path = get_result_path(user)
        return ClusterBackup.dws_exec_cmd(exec_cmd, result_path)

    @classmethod
    def build_sub_job_list(cls, job_info: JobInfo, is_inc_dif_backup=False, node_id=None):
        sub_job_list = [cls.build_sub_job(job_info.job_id, "prepare", 1,
                                          SubJobPolicyEnum.EVERY_NODE_ONE_TIME_SKIP_OFFLINE.value, True)]
        if is_inc_dif_backup:
            sub_job_list.append(cls.build_sub_job(job_info.job_id, "backup", 2,
                                                  SubJobPolicyEnum.FIXED_NODE.value, False, node_id))
        else:
            sub_job_list.append(cls.build_sub_job(job_info.job_id, "backup", 2,
                                                  SubJobPolicyEnum.ANY_NODE.value, False))
        sub_job_list.append(cls.build_sub_job(job_info.job_id, "post", 3,
                                              SubJobPolicyEnum.EVERY_NODE_ONE_TIME_SKIP_OFFLINE.value, True))
        sub_job_list.append(cls.build_sub_job(job_info.job_id, "queryCopy", 4,
                                              SubJobPolicyEnum.ANY_NODE.value, False))
        return sub_job_list

    @classmethod
    def build_sub_job(cls, job_id, job_name, job_priority, policy, ignore_failed=False, exec_node_id=None):
        sub_job = SubJobModel(
            jobId=job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName=job_name, jobPriority=job_priority,
            policy=policy, ignoreFailed=ignore_failed, execNodeId=exec_node_id
        ).dict(by_alias=True)
        return sub_job

    @classmethod
    def get_node_id_by_host_ip(cls, job_info: JobInfo, host_ip):
        for node in job_info.nodes:
            if node.get("endpoint", "") == host_ip \
                    or node.get("extendInfo", {}).get("subNetFixedIp", "") == host_ip:
                # 云下dws场景endpoint与副本host_ip相对应，云上dws场景subNetFixedIp与副本host_ip相对应
                node_id = node.get("id", "")
                if not node_id:
                    log.error("node id is null")
                return node_id
        log.error("failed to find node id by host ip")
        return ""

    @log_start()
    def update_metadata(self, backup_key, job_info: JobInfo):
        """
        函数功能: 备份成功后，记录一些元数据信息到元数据仓库
        参数: @backup_key: 备份生成的 key
            @job_info: 任务相关信息
            @agents_list: 非侵入式备份时，使用的代理主机列表
        """
        metadata = {
            JsonKey.BACKUP_KEY: backup_key,
            JsonKey.HOST_NAME: socket.gethostname(),
            JsonKey.HOST_IP: get_register_ip(job_info.cluster_agents, job_info.host_agents),
            JsonKey.CLUSTER_NODES: list(DwsCluster.get_all_cluster_node_name(job_info.usr, job_info.env_path)),
            JsonKey.STORAGE_ESN: list(job_info.storage_esn),
            JsonKey.VERSION: DwsCluster(job_info.usr, job_info.env_path).get_cluster_version(),
            JsonKey.STORAGE_ID: job_info.storage_id,
            JsonKey.STORAGE_TYPE: job_info.storage_type,
            JsonKey.AGENT_ID: get_cur_agent_id_from_param(job_info.nodes),
            JsonKey.BACKUP_TOOL_TYPE: job_info.backup_tool_type,
            JsonKey.METADATA_PATH: job_info.metadata_destination,
            JsonKey.COPY_ID: job_info.copy_id,
            JsonKey.TOTAL_DATA_SIZE: ClusterBackup.get_total_data_size(job_info),
            JsonKey.BACKUP_TIME: self._get_backup_time(job_info, backup_key)
        }
        log.info(f"storage_type is {job_info.storage_type}")
        try:
            write_file(get_metadata_path(job_info.copy_id, job_info.meta_path), json.dumps(metadata))
        except Exception as err:
            log.error(f"Write metadata failed, err: {err}, {job_info.log_format()}")
            return False

        log.info(f"Write metadata suc, {job_info.log_format()}")
        return True

    @log_start()
    def save_index_info(self, job_info: JobInfo, backup_key=None):
        """
        函数功能：存储细粒度信息，用于恢复用
        """
        try:
            self._db = DWSIndexFile(f"sqlite:///{ClusterBackup.get_index_file_path(job_info)}")
        except Exception as e:
            log.error(f'Err: {e}, job_info: {job_info.log_format()}')
            raise e
        if backup_key:
            self.save_cluster_info_with_table_list(job_info, backup_key)
        else:
            self.save_cluster_info_with_query(job_info)

        # 修改copymetadata.sqlite文件权限为root:root,600
        change_path_permission(ClusterBackup.get_index_file_path(job_info), user_name="root", mode=PERMISSION_600)
        return True

    @log_start()
    def save_cluster_info_with_table_list(self, job_info: JobInfo, backup_key):
        """
        功能描述：保存cluster信息, 使用table.list
        """
        log.info("Save cluster info with table.list.")
        cluster = job_info.protect_env.get('name')
        self.cluster_uuid = str(uuid.uuid1())
        self._db.insert_record(DwsIndex(UUID=self.cluster_uuid,
                                        NAME=cluster,
                                        TYPE=BackupResType.CLUSTER.value,
                                        PARENT_PATH='/',
                                        PARENT_UUID=''))

        self.save_table_info_with_table_list(cluster, backup_key, job_info.metadata_destination)

    @log_start()
    def save_table_info_with_table_list(self, cluster, backup_key, metadata_destination):
        """
        功能描述：读取table.list用来保存table信息
        """
        dws_config = get_dws_config()
        deploy_type = int(dws_config.get("deployType", DwsDeployType.DEPLOY_IN_SERVER.value))
        if deploy_type == DwsDeployType.DEPLOY_IN_SANDBOX.value:
            sandbox_path = dws_config.get("sandboxPath", "/var/chroot").strip()
            table_list_path = f"{sandbox_path}/{metadata_destination}/roach/{backup_key}/table.list"
        else:
            table_list_path = f"{metadata_destination}/roach/{backup_key}/table.list"
        if not os.path.exists(table_list_path):
            log.error("Table.list does not exist.")
            raise Exception("Table.list does not exist.")
        tables = []
        with open(table_list_path, 'r', encoding='utf-8') as file:
            for line in file:
                items = split_line_in_table_list_file(line)
                if len(items) != 3:
                    log.error(f"Invalid line [{line}] in table.list")
                    continue
                database = items[0].replace("\"", "")
                schema = items[1].replace("\"", "")
                table = items[2].replace("\"", "")
                if database not in self.database_uuid_dict:
                    self.save_database_info_with_table_list(cluster, database)
                schema_key = f"{database}/{schema}"
                if schema_key not in self.schema_uuid_dict:
                    self.save_schema_info_with_table_list(cluster, database, schema)
                table_uuid = str(uuid.uuid1())
                table_parent_path = f"/{cluster}/{database}/{schema}"
                tables.append(DwsIndex(UUID=table_uuid,
                                       NAME=table,
                                       TYPE=BackupResType.TABLE.value,
                                       PARENT_PATH=table_parent_path,
                                       PARENT_UUID=self.schema_uuid_dict[schema_key]))
                if len(table) >= TABLE_PAGE_SIZE:
                    self._db.insert_records(tables)
                    tables = []
            if tables:
                self._db.insert_records(tables)

    @log_start()
    def save_database_info_with_table_list(self, cluster, database):
        """
        功能描述：保存database 信息
        """
        database_uuid = str(uuid.uuid1())
        database_parent_path = f"/{cluster}"
        self._db.insert_record(DwsIndex(UUID=database_uuid,
                                        NAME=database,
                                        TYPE=BackupResType.DATABASE.value,
                                        PARENT_PATH=database_parent_path,
                                        PARENT_UUID=self.cluster_uuid))
        self.database_uuid_dict[database] = database_uuid

    @log_start()
    def save_schema_info_with_table_list(self, cluster, database, schema):
        """
        功能描述：保存 schema 信息
        """
        schema_uuid = str(uuid.uuid1())
        schema_parent_path = f"/{cluster}/{database}"
        self._db.insert_record(DwsIndex(UUID=schema_uuid,
                                        NAME=schema,
                                        TYPE=BackupResType.SCHEMA.value,
                                        PARENT_PATH=schema_parent_path,
                                        PARENT_UUID=self.database_uuid_dict[database]))
        self.schema_uuid_dict[f"{database}/{schema}"] = schema_uuid

    def save_cluster_info_with_query(self, job_info: JobInfo):
        """
        功能描述：保存table，database等信息
        """
        log.info("Save cluster info with query.")
        cluster_uuid = str(uuid.uuid1())
        self._db.insert_record(DwsIndex(UUID=cluster_uuid,
                                        NAME=job_info.res_name,
                                        TYPE=BackupResType.CLUSTER.value,
                                        PARENT_PATH='/',
                                        PARENT_UUID=''))

        self.save_database_info_with_query(job_info, cluster_uuid, '/' + job_info.res_name)

    @log_start()
    def save_database_info_with_query(self, job_info: JobInfo, parent_uuid: str, parent_path: str):
        """
        功能描述：保存database 信息
        """
        for database in QueryRes(job_info.usr, job_info.env_path).query_all_database_in_cluster():
            database_uuid = str(uuid.uuid1())
            self._db.insert_record(DwsIndex(UUID=database_uuid,
                                            NAME=database,
                                            TYPE=BackupResType.DATABASE.value,
                                            PARENT_PATH=parent_path,
                                            PARENT_UUID=parent_uuid))
            self.save_schema_info_with_query(job_info, database, database_uuid, f'{parent_path}/{database}')

    @log_start()
    def save_schema_info_with_query(self, job_info: JobInfo, database_name: str, parent_uuid: str, parent_path: str):
        """
        功能描述：保存 schema 信息
        """
        page_no = 0
        while True:
            schemas = QueryRes(job_info.usr, job_info.env_path).query_schema_in_database(database_name, page_no,
                                                                                         SCHEMA_PAGE_SIZE)
            for schema in schemas:
                schema_uuid = str(uuid.uuid1())
                self._db.insert_record(DwsIndex(UUID=schema_uuid,
                                                NAME=schema,
                                                TYPE=BackupResType.SCHEMA.value,
                                                PARENT_PATH=parent_path,
                                                PARENT_UUID=parent_uuid))
                self.save_table_info_with_query(job_info,
                                                [database_name, schema, schema_uuid, f'{parent_path}/{schema}'])

            if len(schemas) < SCHEMA_PAGE_SIZE:
                break
            page_no += 1

    @log_start()
    def save_table_info_with_query(self, job_info: JobInfo, param_list: list):
        """
        功能描述：保存 table 信息
        """
        database_name = param_list[0]
        schema_name = param_list[1]
        parent_uuid = param_list[2]
        parent_path = param_list[3]

        if not database_name or not schema_name:
            raise Exception('Param of database_name or schema_name err')

        page_no = 0
        while True:
            tables = QueryRes(job_info.usr, job_info.env_path).query_table_in_schema(database_name, schema_name,
                                                                                     page_no, TABLE_PAGE_SIZE)
            record_list = []
            for table in tables:
                record_list.append(DwsIndex(UUID=str(uuid.uuid1()),
                                            NAME=table,
                                            TYPE=BackupResType.TABLE.value,
                                            PARENT_PATH=parent_path,
                                            PARENT_UUID=parent_uuid))
            self._db.insert_records(record_list)
            if len(tables) < TABLE_PAGE_SIZE:
                break
            page_no += 1

    @log_start()
    def construct_cmd(self, job_info: JobInfo, intrusive_mode, port):
        """
        功能描述： 构造备份命令
        参数: @job_info 任务相关信息
        @intrusive_mode： 是不是侵入式
        返回值： 成功 返回  备份命令

        """
        log.info(f"Cluster, {job_info.log_format()}")
        master_port = select_available_port(DwsRoachPort.ROACH_PORT_START, DwsRoachPort.ROACH_PORT_END)
        if not master_port:
            raise Exception(f"Master port {master_port} can not use")
        roach_param = construct_roach_param()
        backup_cmd = f"{self._python_version} $GPHOME/script/GaussRoach.py -t backup --master-port {master_port} " \
                     f"--media-type NBU --media-destination nbu_policy {roach_param} " \
                     f"--metadata-destination {job_info.metadata_destination} --cbm-recycle-level 1 "

        if job_info.physical_fine_grained:
            backup_cmd += f" --physical-fine-grained"

        if intrusive_mode == IntrusiveMode.NON_INTRUSIVE_MODE:
            backup_cmd += f" --nbu-on-remote --nbu-media-list /home/{job_info.usr}/media_list_file.txt " \
                          f"--client-port {port}"

        if job_info.backup_type != BackupTypeEnum.FULL_BACKUP:
            try:
                backup_key = CopyInfoParam.get_backup_key(self.get_last_copy_info(job_info))
            except Exception as e:
                log.error(f"Get last backup key err, err: {e}")
                raise e
            if backup_key:
                log.info(f"Backup_key: {backup_key}")
                backup_cmd += f' --prior-backup-key {backup_key}'
            else:
                raise Exception("Last backup key err.")

        return backup_cmd

    @log_start()
    def check_env_allow(self, job_info):
        if not DwsCommon(job_info.usr, job_info.env_path).is_cur_node_cn():
            log.error(f"Current node not exists CN instance, {job_info.log_format()}")
            raise ErrCodeException(
                log_detail=LogDetail(
                    logDetail=DwsErrorCode.NO_CN_INSTANCE_WHEN_BACKUP,
                    logTimestamp=int(time.time()),
                    logLevel=DBLogLevel.ERROR))

        if DwsResource.check_node_env(job_info.usr, job_info.env_path) == DwsErrorCode.NO_ERR:
            log.info(f"Judge suc. {job_info.log_format()}")
            return True

        log.error(f"Judge failed. {job_info.log_format()}")
        return False

    @log_start()
    def check_agent(self, job_info):
        """
        判断当前节点能不能执行
        返回值： True ：可以执行
                False： 不能执行
        """
        cur_agent_id = get_cur_agent_id_from_param(job_info.nodes)
        try:
            last_agent_id = CopyInfoParam.get_agent_id(self.get_last_copy_info(job_info))
        except Exception as e:
            last_agent_id = None
            log.warn(f"Get last copy info failed, err: {e}.")

        check_state = (cur_agent_id == last_agent_id or (last_agent_id in job_info.failed_agents))
        if check_state or ((last_agent_id not in job_info.agents) and (last_agent_id not in job_info.failed_agents)):
            log.info(f'Check agent suc. {job_info.log_format()}.')
            return True

        log.info(f'Cur node not allow. {job_info.log_format()}.')
        return False

    @log_start()
    @out_result()
    def allow_running_in_local_node(self, job_info: JobInfo, is_main_job):
        """
        功能描述： 检查当前节点是否支持执行脚本
        参数：
        @usr_name： 用户名字
        @env_path： 环境变量路径
        返回值：true: 能够执行当前任务
               false： 当前脚本不能在该节点执行
        """
        log.info(f"Is main job：{is_main_job}, sub job type: {job_info.sub_job_type}, {job_info.log_format()}.")
        # 主任务，备份子任务，恢复子任务逻辑一致
        if is_main_job or job_info.sub_job_type in ["restore_subtask", "backup"]:
            dws_config = get_dws_config()
            deploy_type = int(dws_config.get("deployType", DwsDeployType.DEPLOY_IN_SERVER.value))
            if deploy_type == DwsDeployType.DEPLOY_IN_SANDBOX.value:
                if not mount_bind_sandbox_path():
                    log.error("Mount bind sandBox path failed")
                    return False
            if is_proxy_node(job_info.nodes):
                return False
            if self.check_env_allow(job_info):
                # 恢复任务
                if job_info.type != TaskType.BACKUP:
                    return True
                # 全量备份
                if job_info.backup_type == BackupTypeEnum.FULL_BACKUP:
                    return True
                # 增量、差异备份
                if self.check_agent(job_info):
                    return True
            return False
        # 白名单子任务，查询副本子任务，后置清理子任务，不区分节点
        if job_info.sub_job_type in ["prepare", "queryCopy", "post", "clean"]:
            return True
        else:
            log.error(f'Unknown sub job. {job_info.log_format()}')
            return False

    @log_start()
    @out_result()
    @progress_notify()
    def pre_backup(self, job_info: JobInfo):
        """
        功能描述： 前置任务脚本
        参数：@job_info 任务信息
        返回值：(结果， pid)
        """
        if job_info.backup_tool_type == CopyGenerateType.GENERATE_BY_GDS:
            if not type(self).pre_job_create_gds_res(job_info.copy_id, job_info.cache_path, job_info.meta_path):
                log.error(f"Create GDS res failed in pre job. {job_info.log_format()}")
                return False
        else:
            if not type(self).pre_job_create_res(job_info.copy_id, job_info.cache_path, job_info.meta_path):
                log.error(f"Create roach res failed in pre job. {job_info.log_format()}")
                return False

        write_file(
            get_progress_path(job_info.copy_id, job_info.sub_job_id, job_info.cache_path),
            json.dumps(
                SubJobDetails(
                    taskId=job_info.job_id,
                    subTaskId=job_info.sub_job_id,
                    taskStatus=SubJobStatusEnum.RUNNING,
                    progress=0).dict(by_alias=True)))

        if not type(self).create_data_rep_1(job_info):
            log.error(f"Create data reps failed. {job_info.log_format()}")
            return False

        # 获取集群状态
        if not type(self).cluster_is_balanced(job_info.usr, job_info.env_path):
            log.error(f"Cluster balanced err. {job_info.log_format()}")
            raise ErrCodeException(
                log_detail=LogDetail(
                    logDetail=DwsErrorCode.CLUSTER_STATE_NOT_BALANCED,
                    logTimestamp=int(time.time()),
                    logLevel=DBLogLevel.ERROR))
        if job_info.backup_type == BackupTypeEnum.FULL_BACKUP:
            business_type = BusinessConfigType.FULL_BACKUP_TYPE
        else:
            business_type = BusinessConfigType.INCREMENT_BACKUP_TYPE
        save_business_config(business_type, job_info.cache_path, job_info.copy_id)
        # 增量备份需要合并backupkey.db
        if job_info.backup_type != BackupTypeEnum.FULL_BACKUP:
            log.info(f"Begin to merge backupkey.db.")
            last_full_copy_id = self.get_last_copy_id(job_info)
            object_data_path = os.path.join(job_info.meta_path, "meta", last_full_copy_id, "objectmeta")
            target_path = os.path.join(job_info.cache_path, "meta", job_info.copy_id, "objectmeta")  # 聚合后的路径
            if not self._aggregate_single_copy_object_data(target_path, object_data_path, job_info.job_id):
                log.error(f"Aggregate copy {job_info.copy_id} object data failed. main task:{job_info.job_id}")
                return False
            log.info(f"merge backupkey.db succ, job id: {job_info.job_id}.")
        log.info(f"Prepare res suc, copy_id: {job_info.copy_id}, {job_info.log_format()}")
        return True

    @log_start()
    def get_last_copy_id(self, job_info: JobInfo):
        last_copy_info = self.get_last_copy_info(job_info)
        last_copy_id = last_copy_info.get("id", "")
        log.info(f"get last_copy_id: {last_copy_id}, last_copy_info{last_copy_info}.")
        return last_copy_id

    @log_start()
    def get_last_copy_info(self, job_info: JobInfo):
        last_copy_type = [RpcParamKey.FULL_COPY] if job_info.backup_type == BackupTypeEnum.DIFF_BACKUP \
            else [RpcParamKey.FULL_COPY, RpcParamKey.INCREMENT_COPY]
        input_param = {
            RpcParamKey.APPLICATION: job_info.pro_obj,
            RpcParamKey.TYPES: last_copy_type,
            RpcParamKey.COPY_ID: "",
            ParamKeyConst.JOB_ID: job_info.job_id
        }

        return invoke_rpc_tool_interface(job_info.job_id, RpcParamKey.QUERY_PREVIOUS_CPOY, input_param)

    @log_start()
    def report_job_details(self, job_info: JobInfo, sub_job_details: dict):
        try:
            result_info = invoke_rpc_tool_interface(job_info.job_id, RpcParamKey.REPORT_JOB_DETAILS, sub_job_details)
        except Exception as err:
            log.error(f"Invoke rpc_tool interface exception, err: {err}.")
            return False

        ret_code = result_info.get("code")
        if ret_code != NumberConst.ZERO:
            log.error(f"Invoke rpc_tool interface failed, result code: {ret_code}.")
            return False

        return True

    @log_start()
    def convert_full_backup(self, job_info: JobInfo, last_copy):
        """
            函数功能： 判断是不是需要转全量
            参数： @job_info: 当前任务信息
                @last_copy： 查询回来的上一次副本信息

            返回值： 类型：bool
                    @true： 需要转全量
                    @False： 不需要转全量
        """
        log.info(f'{job_info.log_format()}')

        ret1 = False
        ret2 = False
        check_bus_match = self.check_version(CopyInfoParam.get_version(last_copy),
                                             DwsCluster(job_info.usr, job_info.env_path).get_cluster_version()) or \
                          self.check_backup_node(CopyInfoParam.get_host_ip(last_copy), get_cur_host_all_ip()) or \
                          self.check_meta_path(CopyInfoParam.get_metadata_destination(last_copy),
                                               job_info.metadata_destination)
        if not CopyInfoParam.is_normal(last_copy) or check_bus_match:
            ret1 = True
        # 环境部署变化
        if self.check_node(set(CopyInfoParam.get_cluster_nodes(last_copy)),
                           DwsCluster.get_all_cluster_node_name(job_info.usr, job_info.env_path)) or \
                self.check_storage_id(CopyInfoParam.get_storage_id(last_copy), job_info.storage_id) or \
                self.check_storage_capacity(set(CopyInfoParam.get_storage_esn(last_copy)), job_info.storage_esn):
            ret2 = True
        return ret1 or ret2

    @log_start()
    def gen_sub_job(self, job_info: JobInfo):
        if job_info.backup_type == BackupTypeEnum.FULL_BACKUP:
            sub_job_list = self.build_sub_job_list(job_info)
            output_result_file(self._pid, sub_job_list)
            return True
        last_copy_info = self.get_last_copy_info(job_info)
        last_copy_host_ip = CopyInfoParam.get_host_ip(last_copy_info)
        if not last_copy_host_ip:
            log.error("Failed to get last_copy_host_ip. Gen sub job failed.")
            return False
        node_id = self.get_node_id_by_host_ip(job_info, last_copy_host_ip)
        log.info(f"Last copy host ip: {last_copy_host_ip}. Exec node id: {node_id}.")
        if not node_id:
            log.error("Failed to get node_id. Gen sub job failed.")
            return False
        sub_job_list = self.build_sub_job_list(job_info, True, node_id)
        output_result_file(self._pid, sub_job_list)
        return True

    @log_start()
    def check_version(self, last_version, cur_version):
        """
        函数功能： 判断版本是不是一致 if not 转全量

        参数： last_copy_info 上一次副本信息
               cur_version 当前版本
        """
        log.debug(f"last_version: {last_version}, cur_version: {cur_version}")
        return last_version != cur_version

    @log_start()
    def check_backup_node(self, last_host_ip, curl_all_ip):
        """
        函数功能：判断上次备份节点和这次备份是不是一样。 if not: 需要转全量
        """
        log.debug(f"Last host ip: {last_host_ip}, curl all ip: {curl_all_ip}")
        return last_host_ip not in curl_all_ip

    @log_start()
    def check_storage_id(self, last_storage_id, storage_id):
        """
        函数功能：判断上次备份存储ID和这次备份是不是一样。 if not: 需要转全量
        """
        log.debug(f"last_storage_id: {last_storage_id}, storage_id: {storage_id}")
        return last_storage_id != storage_id

    @log_start()
    def check_meta_path(self, last_meta_path, meta_path):
        """
        函数功能：判断上次备份 元数据目录和这次目录是不是一样。 if not: 需要转全量
        """
        log.debug(f"last_meta_path: {last_meta_path}, meta_path: {meta_path}")
        return last_meta_path != meta_path

    @log_start()
    def check_node(self, last_cluster_info, cur_cluster_info):
        """
        函数功能: 集群扩容或者缩容后第一次备份。if not 需要转全量
            判断逻辑：前后备份节点个数是否相等
        """
        log.debug(f"last_cluster_info: {last_cluster_info}, cur_cluster_info: {cur_cluster_info}")
        return last_cluster_info != cur_cluster_info

    @log_start()
    def check_storage_capacity(self, last_storage_esn: set, now_storage_esn):
        """
        函数功能: 检测存储是否缩容。if not 需要转全量
        说明 ：上次备份的仓库信息 是这次仓库的子集。这种情况不需要转全量，否则反之。
        """
        log.debug(f"last_storage_esn: {last_storage_esn}, now_storage_esn: {now_storage_esn}")
        return not last_storage_esn.issubset(now_storage_esn)

    @log_start()
    def check_backup_type(self, job_info: JobInfo):
        """
        函数功能：判断是否需要转全量
        参数： job_info 任务信息
        """
        log.info(f'{job_info.log_format()}')
        try:
            result = self.get_last_copy_info(job_info)
        except Exception as exception:
            log.warn(f'Get last copy info failed, {exception}.')
            output_result_file(job_info.pid,
                               ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR.value,
                                            bodyErr=0x5E02502D).dict(by_alias=True))
            return True

        ret = False
        try:
            # 增量转全量判断
            ret = self.convert_full_backup(job_info, result)
        except Exception as e:
            log.warn(f"Check_backup_type err: {e}, {job_info.log_format()}")

        if ret:
            output_result_file(job_info.pid,
                               ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR.value,
                                            bodyErr=0x5E02502D).dict(by_alias=True))
        else:
            output_result_file(job_info.pid,
                               ActionResult(code=ExecuteResultEnum.SUCCESS.value).dict(by_alias=True))

        return True

    @log_start()
    @out_result()
    @progress_notify(fail_lab=DwsBackupLabel.WHITE_SUB_JOB_FAILED_LABEL)
    def white_sub_job(self, job_info: JobInfo, intrusive_mode):
        """
        功能描述： 白名单子任务,每个节点都会执行
        参数：
        @job_info： 任务相关参数
        @intrusive_mode: 是不是侵入式
        返回值True
        """
        log.info(f"Enter. {job_info.log_format()}, intrusive: {intrusive_mode}.")
        host_ip = get_register_ip(job_info.cluster_agents, job_info.host_agents)
        if not host_ip:
            return False
        log.info(f"Type: {job_info.type}, host_ip: {host_ip}")
        if job_info.type == TaskType.BACKUP \
                and not type(self).save_task_info_to_cache(job_info, TaskType.BACKUP, host_ip):
            log.error(f"Save filesystem info failed, {job_info.log_format()}.")
            return False

        ClusterBackup.create_path_white_sub_job(job_info.copy_id, job_info.cache_path, job_info.meta_path, host_ip)

        usr_name = job_info.usr if is_intrusive(job_info.nodes) == IntrusiveMode.INTRUSIVE_MODE else 'rdadmin'
        if not type(self).create_data_rep_2(job_info, host_ip, usr_name):
            log.error(f"Create data reps failed. {job_info.log_format()}")
            return False
        # 写cache仓路径到tmp目录， 每个节点 各享一个
        cache_info = {
            "cacheRepoPath": job_info.cache_path,
            "metaRepoPath": job_info.meta_path,
            "copyId": job_info.copy_id,
            "taskId": job_info.job_id,
            "hostKey": host_ip
        }

        cache_info_path = os.path.join(f"{get_install_head_path()}/" \
                                       f"DataBackup/ProtectClient/ProtectClient-E/stmp/dws_cacheInfo.txt")
        write_file(cache_info_path, json.dumps(cache_info))

        change_path_permission(cache_info_path, user_name="rdadmin", mode=PERMISSION_640)

        if not DistributeDwsNodes(job_info).check_source_delete_cfg(host_ip):
            return False

        dir_path = os.path.join(job_info.cache_path, "tmp", job_info.copy_id)
        if is_proxy_node(job_info.nodes):
            ip, port = get_roach_ip_and_port(get_process_cmd('roach_client'))
            if not ip or not port:
                log.warn("Roach client not started.")
                return record_subtask_info_to_file(host_ip, "white_sub_job", dir_path)
            host_ip = get_register_ip(job_info.cluster_agents, job_info.host_agents)
            if not host_ip:
                log.error("Fail to get register ip.")
                return False
            write_file(os.path.join(job_info.cache_path, 'tmp', job_info.copy_id, 'roach_client',
                                    f'{host_ip}.txt'), json.dumps({'ip': ip, 'port': port}))
        else:
            if not type(self).change_xbsa_mod(job_info.usr):
                log.error(f"Change xbsa mod failed, {job_info.log_format()}")
                return False

        return record_subtask_info_to_file(host_ip, "white_sub_job", dir_path)

    @log_start()
    def create_roach_client_ip_list_file(self, job_info: JobInfo, ip_list: list):
        media_path = os.path.join('/home', job_info.usr, 'media_list_file.txt')
        try:
            write_lines(media_path, "\n".join(ip_list))
        except Exception as err:
            log.exception(f"Write ip list file err: {err}.")
            return False
        change_path_permission(media_path, user_name=job_info.usr, mode=PERMISSION_640)
        if not copy_file_into_sandbox(job_info.usr, media_path, os.path.join('/home', job_info.usr)):
            return False
        return True

    @log_start()
    def check_last_subtask_status(self, job_info: JobInfo):
        passed_nodes = get_subtask_info_from_file("white_sub_job",
                                                  os.path.join(job_info.cache_path, "tmp", job_info.copy_id))
        return check_subtask_info(job_info.cluster_agents, job_info.host_agents, passed_nodes)

    @log_start()
    @progress_notify(DwsBackupLabel.BACKUP_SUBJOB_SUC_LABEL, DwsBackupLabel.BACKUP_SUBJOB_FAILED_LABEL)
    def sub_job_backup(self, job_info: JobInfo, intrusive_mode):
        """
        功能描述： 备份子任务
        参数：
        @job_info: 任务信息
        @intrusive_mode: 是不是侵入式
        """
        if not self.check_last_subtask_status(job_info):
            return False

        # 给DWS节点分配文件系统
        distribute_nodes_obj = DistributeDwsNodes(job_info)
        if not distribute_nodes_obj.distribute_dws_nodes():
            log.error(f"Distribute failed, {job_info.log_format()}")
            return False

        self.update_progress(job_info, SubJobStatusEnum.RUNNING.value, 0)

        port = 55555
        if intrusive_mode == IntrusiveMode.NON_INTRUSIVE_MODE:
            ret, ip_list, port = type(self).check_roach_port(
                os.path.join(job_info.cache_path, 'tmp', job_info.copy_id, 'roach_client'), job_info)
            if not ret:
                log.error(f"Check roach port failed, {job_info.log_format()}.")
                raise ErrCodeException(log_detail=self.get_all_roach_client_port_not_same_log_detail())

            if not self.create_roach_client_ip_list_file(job_info, ip_list):
                log.error(f"Create roach client ip list file failed, {job_info.log_format()}.")
                return False

        cluster_version = DwsCluster(job_info.usr, job_info.env_path).get_cluster_version()
        if "8.0." not in cluster_version:
            self._python_version = "python3"

        try:
            backup_cmd = self.construct_cmd(job_info, intrusive_mode, port)
        except Exception as err:
            log.error(f"Construct cmd failed: err: {err}, backup_type: {job_info.backup_type}")
            raise err

        if job_info.physical_fine_grained and compare_version(cluster_version, TABLE_LIST_SUPPORT_VERSION) < 0 \
                and not self.sub_save_index_info(job_info):
            return False

        # 开启免密
        if not open_non_secret(job_info.usr, job_info.env_path):
            return False
        ret, out_info = ClusterBackup.dws_exec_backup_cmd_by_type(job_info.usr, job_info.env_path, backup_cmd)

        self._delete_tmp_files(job_info)
        # 关闭免密
        close_non_secret(job_info.usr, job_info.env_path)
        if "Backup key: " not in out_info or ret not in [DwsRetEnum.SUCCESS, GAUSS_ROAH_CMD_SUCCESS]:
            log.error(f"Backup failed, ret: {ret}, out_info: {out_info}, {job_info.log_format()}.")
            ClusterBackup.anglyze_err_info(job_info, out_info)
            return False

        backup_key = re.search(r'Backup key: ([0-9_.]*)', out_info).group(1)
        if not self._check_backup_status(job_info, backup_key):
            log.error(f"Backup failed, ret: {ret}, out_info: {out_info}, {job_info.log_format()}.")
            ClusterBackup.anglyze_err_info(job_info, out_info)
            return False

        if job_info.physical_fine_grained and compare_version(cluster_version, TABLE_LIST_SUPPORT_VERSION) >= 0 \
                and not self.sub_save_index_info(job_info, backup_key):
            return False

        if not self.update_metadata(backup_key, job_info):
            raise Exception(f"Update metadata failed, {job_info.log_format()}")

        log.info(f"Exec backup cmd suc, {job_info.log_format()}, dws_backup_id: {backup_key}")
        return True

    def sub_save_index_info(self, job_info: JobInfo, backup_key=None):
        try:
            self.save_index_info(job_info, backup_key)
        except Exception as e:
            log.error(f'Index file err: {e}')
            return False
        return True

    @log_start()
    @out_result()
    @progress_notify()
    def post_sub_job(self, job_info: JobInfo, intrusive_mode):
        """
        功能描述：备份的后置子任务
        参数：
        @job_info： 任务相关参数
        @intrusive_mode：是不是侵入式
        返回值：bool
        """
        return True

    @log_start()
    @out_result()
    def abort_backup(self, job_info: JobInfo):
        """
        功能描述: 中止备份任务
        参数：
        @job_info: 任务信息
        """
        if "8.0." not in DwsCluster(job_info.usr, job_info.env_path).get_cluster_version():
            self._python_version = "python3"
        stop_cmd = f"{self._python_version} $GPHOME/script/GaussRoach.py -t stop"
        log.info(f"Stop cmd: {stop_cmd}, {job_info.log_format()}.")
        ret, std_out = dws_exec_cmd_by_type(job_info.usr, job_info.env_path, stop_cmd)
        log.info(f"Stop finished, ret: {ret}, out: {std_out}, {job_info.log_format()}.")
        if not ret:
            log.warn(f"Stop backup failed, ret: {ret}, std_err:{std_out}, {job_info.log_format()}.")
            self.kill_work_process(job_info)

        return True

    @log_start()
    def kill_work_process(self, job_info: JobInfo):
        """
        函数功能：杀掉除中止进程外的其他的同jobID的进程
        """
        work_pids = DwsCommon.filter_pids(job_info.job_id, job_info.pid)
        for pid in work_pids:
            try:
                psutil.Process(pid).kill()
            except Exception as err:
                log.warn(f"Get process err: {err}.")
                continue
            log.debug(f"kill process: {pid}, {job_info.log_format()}.")

    @log_start()
    def get_progress(self, job_info: JobInfo, is_post_job, is_sub_job_backup):
        """
        功能描述： 获取进度
        参数：
        @job_info：JobInfo  任务详细信息
        @is_post_job： 是不是后置任务
        @is_sub_job_backup: 是不是备份子任务
        """
        log.info(f'{job_info.log_format()}, sub_backup: {is_sub_job_backup}')
        if not job_info.cache_path:
            log.error(f"Result path error, cache: {job_info.cache_path}, {job_info.log_format()}")
            return False

        try:
            result = read_file(get_progress_path(job_info.copy_id, job_info.sub_job_id, job_info.cache_path))
        except Exception as err:
            log.error(f'Read result failed, err {err}, {job_info.log_format()}.')
            return False
        total_data_size = 0
        log_detail = result.get("logDetail")
        if job_info.backup_tool_type == CopyGenerateType.GENERATE_BY_ROACH and is_sub_job_backup:
            total_data_size = ClusterBackup.get_total_data_size(job_info)
            if SubJobStatusEnum.RUNNING.value == result.get("taskStatus"):
                log_detail = [
                    LogDetail(logInfo=DwsBackupLabel.DWS_BACKUP_SUBJOB_ROACH_LABEL,
                              logInfoParam=[get_register_ip(job_info.cluster_agents, job_info.host_agents),
                                            str(total_data_size) + 'MB'],
                              logLevel=DBLogLevel.INFO.value)
                ]
        elif job_info.backup_tool_type == CopyGenerateType.GENERATE_BY_GDS and is_sub_job_backup:
            total_data_size = ClusterBackup.get_total_data_size(job_info)

        sub_job_detail = SubJobDetails(
            taskId=result.get("taskId", ""),
            subTaskId=result.get("subTaskId", ""),
            progress=result.get("progress", 50),
            speed=result.get("speed", 0),
            dataSize=total_data_size * 1024,
            logDetail=log_detail,
            taskStatus=result.get("taskStatus", SubJobStatusEnum.COMPLETED.value))
        output_result_file(self._pid, sub_job_detail.dict(by_alias=True))
        if is_post_job and sub_job_detail.task_status >= SubJobStatusEnum.COMPLETED:
            type(self).clear_cache(job_info)

        return True

    @log_start()
    def query_backup_copy(self, job_info: JobInfo):
        """
        功能描述： 获取副本信息
        参数：
        @job_info：JobInfo 任务相关信息
        """
        log.info(f"Enter. {job_info.log_format()}")

        try:
            result = read_file(get_metadata_path(job_info.copy_id, job_info.meta_path))
        except Exception as err:
            log.error(f'Err: {err}, {job_info.log_format()}')
            return False

        data_rep_rsp = []
        # 数据仓
        for item in job_info.data_reps:
            data_rep_rsp.append(
                CopyInfoRepModel(id=item.get('id'),
                                 repositoryType=item.get("repositoryType"),
                                 isLocal=item.get("isLocal"),
                                 protocol="NFS",
                                 remotePath=f"{item.get('remotePath')}/data/{job_info.copy_id}",
                                 remoteHost=item.get("remoteHost"),
                                 extendInfo={
                                     "fsId": item.get("extendInfo", {}).get("fsId")
                                 }).dict(by_alias=True))

        log.debug(f"Construct meta: {job_info.log_format()}")
        # 元数据
        data_rep_rsp.append(
            CopyInfoRepModel(id=job_info.meta_rep.get('id'),
                             repositoryType=job_info.meta_rep.get("repositoryType"),
                             isLocal=job_info.meta_rep.get("isLocal"),
                             protocol="NFS",
                             remotePath=f"{job_info.meta_rep.get('remotePath')}/meta/{job_info.copy_id}",
                             remoteHost=job_info.meta_rep.get("remoteHost"),
                             extendInfo={
                                 "fsId": job_info.meta_rep.get("extendInfo", {}).get("fsId")
                             }).dict(by_alias=True))

        copy_info = Copy(repositories=data_rep_rsp, extendInfo=result).dict(by_alias=True)

        log.info(f"copy_info {copy_info}")
        log.info(f"Query copy suc. {job_info.log_format()}")
        output_result_file(self._pid, copy_info)
        return True

    @log_start()
    @out_result()
    @progress_notify()
    def post_backup(self, job_info: JobInfo):
        """
        功能描述： 后置任务
        参数：
        @job_info：job_info
        """
        # 备份失败的话
        if job_info.backup_result != 0:
            # 删除元数据目录
            meta_path = os.path.join(job_info.meta_path, 'meta', job_info.copy_id)
            del_path_without_exception(meta_path)
            # 删除数据目录
            for rep in job_info.data_reps:
                data_path = os.path.join(rep.get("path")[0], 'data', job_info.copy_id)
                del_path_without_exception(data_path)

        # 删除临时文件
        self._delete_tmp_files(job_info)

        log.info(f"Clear res suc, {job_info.log_format()}.")
        return True

    @log_start()
    def _delete_tmp_files(self, job_info: JobInfo):
        if os.path.isfile(self._list_path) and check_path_legal(self._list_path, "/home/"):
            if not su_exec_rm_cmd(self._list_path):
                log.warn(f"Fail to remove {self._list_path}.")

        media_list = os.path.join('/home', job_info.usr, 'media_list_file.txt')
        if os.path.isfile(media_list) and check_path_legal(media_list, "/home/"):
            if not su_exec_rm_cmd(media_list):
                log.warn(f"Fail to remove {media_list}.")
        schema_list_temp_file = os.path.realpath(os.path.join("/home", job_info.usr, "schema_list_temp.txt"))
        if os.path.isfile(schema_list_temp_file):  # 清理表恢复时的临时表文件
            if not su_exec_rm_cmd(schema_list_temp_file):
                log.warn(f"Fail to remove {schema_list_temp_file}.")
        dws_config = get_dws_config()
        deploy_type = int(dws_config.get("deployType", DwsDeployType.DEPLOY_IN_SERVER.value))
        if deploy_type == DwsDeployType.DEPLOY_IN_SANDBOX.value:
            ret, output = dws_exec_cmd_by_type(job_info.usr, job_info.env_path,
                                               cmd_format("rm {}", media_list))
            if not ret:
                log.warning(f"Fail to remove media_list_file for {output}")
            ret, output = dws_exec_cmd_by_type(job_info.usr, job_info.env_path,
                                               cmd_format("rm {}", schema_list_temp_file))
            if not ret:
                log.warning(f"Fail to remove {schema_list_temp_file} for {output}")

    @log_start()
    def _check_backup_status(self, job_info: JobInfo, backup_key: str):
        dws_config = get_dws_config()
        backup_status_success = dws_config.get("backupStatus", [DwsBackupStatus.UNVERIFIED, DwsBackupStatus.VALIDATED])
        backup_success = False
        for i in range(3):
            current_backup_status = self._get_current_backup_status(job_info, backup_key)
            if current_backup_status in backup_status_success:
                backup_success = True
                break
            time.sleep(10)
            log.error(f"Backup failed, check {i + 1} time, current backup status is [{current_backup_status}].")
        log.info(f"Check backup status, current backup status is [{current_backup_status}].")
        return backup_success

    @log_start()
    def _get_current_backup_status(self, job_info: JobInfo, backup_key: str):
        sh_cmd = f"{self._python_version} $GPHOME/script/GaussRoach.py -t show " \
                 f"--metadata-destination {job_info.metadata_destination} --backup-key {backup_key}"
        ret, out_info = dws_exec_cmd_by_type(job_info.usr, job_info.env_path, sh_cmd)
        if not ret or "STATUS" not in out_info:
            log.error(f"Exec show backup status cmd failed, ret: {ret}, out_info: {out_info}.")
            return DwsBackupStatus.UNKNOWN

        for per_line in out_info.split('\n'):
            temp_arr = per_line.split(":")
            if len(temp_arr) < 2:
                continue
            if temp_arr[0].strip() == "STATUS":
                log.debug(f"Successfully get backup status {temp_arr[1].strip()}.")
                return temp_arr[1].strip()

        return DwsBackupStatus.UNKNOWN

    @log_start()
    def _get_backup_time(self, job_info: JobInfo, backup_key: str):
        sh_cmd = f"{self._python_version} $GPHOME/script/GaussRoach.py -t show " \
                 f"--metadata-destination {job_info.metadata_destination} --backup-key {backup_key}"
        ret, out_info = dws_exec_cmd_by_type(job_info.usr, job_info.env_path, sh_cmd)
        if not ret or "END TIME" not in out_info:
            log.error(f"Exec show backup time cmd failed, ret: {ret}, out_info: {out_info}.")
            return 0

        for per_line in out_info.split('\n'):
            temp_arr = per_line.split(":", maxsplit=1)
            if len(temp_arr) < 2:
                continue
            if temp_arr[0].strip() == "END TIME":
                return convert_to_timestamp(temp_arr[1].strip())
        log.error("Failed to get backup time.")
        return 0

    @log_start()
    def _aggregate_single_copy_object_data(self, cache_path, object_data_path, job_id):
        """
        功能描述：将单个副本里的所有对象数据文件聚合成一个
        cache_path：合成的文件要存放的路径
        object_data_path：被合成的对象数据文件存放路径, meta仓
        """
        log.info(f"Start to merge db in {object_data_path} to {cache_path}")
        tmp_path = os.path.join(cache_path, self._concrete_object_db)
        tmp_path = os.path.realpath(tmp_path)
        if os.path.islink(os.path.join(cache_path, self._concrete_object_db)):
            if not su_exec_rm_cmd(tmp_path):
                log.warn(f"Fail to remove {tmp_path}.")

        try:
            object_conn = sqlite3.connect(os.path.join(cache_path, self._concrete_object_db))
        except Exception as e:
            log.error(f"Connect sqlite {self._concrete_object_db} failed for {e}.main task:{job_id}")
            return False
        object_cur = object_conn.cursor()
        if not object_conn or not object_cur:
            log.error(f"Connect sqlite {self._concrete_object_db} failed.main task:{job_id}")
            return False
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            log.info("Table does not exist, create table.")
            object_cur.execute("CREATE TABLE [BsaObjTable] ([copyId] VARCHAR(100) NOT NULL,"
                               "[objectSpaceName] VARCHAR(1024),[objectName] VARCHAR(1024) NOT NULL,"
                               "[bsaObjectOwner] VARCHAR(64),"
                               "[appObjectOwner] VARCHAR(64),[copyType] INTEGER(8),"
                               "[estimatedSize] VARCHAR(100) NOT NULL,"
                               "[resourceType] VARCHAR(32),[objectType] INTEGER(8),[objectStatus] INTEGER(8),"
                               "[objectDescription] VARCHAR(100),[objectInfo] VARCHAR(256),[timestamp] VARCHAR(64),"
                               "[restoreOrder] VARCHAR(100),[storePath] VARCHAR(1280) NOT NULL,"
                               "[filesystemName] VARCHAR(256) NOT NULL,[filesystemId] VARCHAR(128) NOT NULL,"
                               "[filesystemDeviceId] VARCHAR(256) NOT NULL,[rsv1] VARCHAR(256),[rsv2] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables or len(object_tables) == 0:
            log.error(f"Create dws table failed. main task:{job_id}")
            return False

        db_file_list = self._get_all_db_files(object_data_path, job_id)
        if not db_file_list:
            log.error("No db file")
            return False
        log.info(f"db file list:{db_file_list}")
        self._merge_host_key_db(db_file_list, object_cur, object_conn)
        if os.path.isfile(os.path.join(cache_path, self._concrete_object_db)):
            change_path_permission(os.path.join(cache_path, self._concrete_object_db), mode=PERMISSION_644)

        # 更新此文件的属主
        user_id, group_id = get_file_attribute(object_data_path, job_id)
        if not set_file_attribute(os.path.join(cache_path, "..", ".."), user_id, group_id, job_id):
            return False
        return True

    def _get_all_db_files(self, object_data_path, job_id):
        """
        获取所有要被聚合的db文件
        """
        db_file_list = []
        for host_key_path in os.listdir(object_data_path):
            db_path = os.path.join(object_data_path, host_key_path)
            if not os.path.isdir(db_path) or len(glob.glob(os.path.join(db_path, "*.db"))) == 0:
                log.warn(f"There is no object data in metadata path {db_path}. main task:{job_id}")
                continue

            for db in os.listdir(db_path):
                if db == self._concrete_object_db or not db.endswith(".db"):
                    continue
                db_file_list.append(os.path.join(db_path, db))
        return db_file_list
