#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import locale
import os
import platform
import re
import time

import pexpect
import psutil

from common.cleaner import clear
from common.common import output_result_file, get_previous_copy_info, read_tmp_json_file, invoke_rpc_tool_interface, \
    read_result_file, retry_when_exception, execute_cmd, touch_file, output_execution_result, report_job_details
from common.common_models import ActionResult, SubJobDetails, LogDetail, RepositoryPath, ScanRepositories
from common.const import ExecuteResultEnum, BackupTypeEnum, SubJobPriorityEnum, SubJobStatusEnum, RpcParamKey, \
    CopyDataTypeEnum, SubJobPolicyEnum, ReportDBLabel, DBLogLevel, CMDResult, RepositoryDataTypeEnum, BackupJobResult, \
    RpcToolInterface
from common.file_common import delete_file, create_dir_recursive
from common.lock_common import lock_backup_job_resource
from common.parse_parafile import ParamFileUtil, get_env_variable
from common.util.exec_utils import su_exec_touch_cmd, exec_mkdir_cmd
from goldendb.schemas.glodendb_schemas import ActionResponse
from oracle import logger
from oracle.common.backup_common import read_additional_db_file, exist_addition_file
from oracle.common.common import (check_database_status, query_archive_mode, exist_first_backup, parse_backup_path,
                                  send_params_to_pexpect, write_tmp_json_file, add_file_content, clear_repository_dir,
                                  chown_file_path_owner,
                                  check_database_id_same, mount_bind_path, umount_bind_path, convert_error_code,
                                  write_tmp_file, get_data_file_set,
                                  call_rc_tool_mount_cmd, query_backup_copy_size, check_database_is_primary,
                                  get_current_agent_uuid, check_cdb_is_open, list_pdbs,
                                  list_user, list_table_by_owner)
from oracle.common.constants import ErrorCode, PluginPathConstant, RootCommand, OracleProgress, OracleDataBaseType, \
    BackupSubJobName, LogMetaFileName, OracleTruncateLogMap, ORACLEJsonConstant, PMAuthTypeEnum, AuthTypeMapToStr, \
    AuthTypeMap, ShellAuthTypeEnum, BackupTransportMode, OracleReportLabel, MetaDataConstant, FileTypeMap, \
    ScriptExitCode, TaskType, PageParamConst, ListTableByOwnerParam, Platform
from oracle.common.linux_common import execute_linux_rman_cmd
from oracle.common.metadata_sqlite_service import MetaDataSqliteService
from oracle.common.sql_common import check_database_is_dg
from oracle.common.sql_common import get_pdb_open_cluster_inst, get_table_is_has_foreign_key_dict, \
    check_database_block_change_tracking, list_temporary_objects, get_current_scn
from oracle.common.user_env_common import update_oracle_env, get_user_shell_type
from oracle.common.windows_common import execute_windows_rman_cmd
from oracle.schemas.oracle_schemas import PermissionInfo, SubJob, BackupProgressInfo
from oracle.services.backup.oracle_pdb_backup_service import OraclePdbBackupService
from oracle.services.backup.oracle_storage_backup_service import OracleStorageBackupService
from oracle.services.backup.oracle_windows_backup_service import OracleWindowsBackupService
from oracle.services.restore.windows_sbin_func import unix_timestamp_to_date

if platform.system().lower() != "windows":
    from common.util.check_user_utils import check_os_user


class OracleBackupService(object):
    """
    oracle备份服务
    """

    def __init__(self, pid, job_id, sub_job_id=None):
        self._pid = pid
        self._job_id = job_id
        self._sub_job_id = sub_job_id
        self._param_dict = {}
        self.get_param_dict(pid)
        self._job_dict = self._param_dict.get("job", {})
        self._job_extend_info = self._job_dict.get("extendInfo", {})
        self._job_param = self._job_dict.get("jobParam", {})
        self._backup_type = int(self._job_param.get("backupType", BackupTypeEnum.FULL_BACKUP.value))
        self._protect_obj = self._job_dict.get("protectObject", {})
        self._protect_obj_extend_info = self._protect_obj.get("extendInfo", {})
        self._protect_obj_auth_extend_info = self._protect_obj.get("auth", {}).get("extendInfo", {})
        self._application = self._param_dict.get("application", {})
        self._db_uuid = self._protect_obj.get("id", "")
        self._db_instance_name = ""
        self._asm_instance_name = ""
        self._oracle_home = self._protect_obj_extend_info.get("oracle_home", "")
        self._oracle_base = self._protect_obj_extend_info.get("oracle_base", "")
        self._ip_port_info = []
        self._node_number = 1
        # 默认为oracle，oinstall
        self._install_user_name = self._protect_obj_extend_info.get("installUsername", "oracle")
        get_user_shell_type(self._install_user_name)
        # 用于没有配置环境变量的场景
        self.access_oracle_base = self._protect_obj_extend_info.get("accessOracleBase", "")
        self.access_oracle_home = self._protect_obj_extend_info.get("accessOracleHome", "")
        update_oracle_env(oracle_base=self.access_oracle_base, oracle_home=self.access_oracle_home)
        self._oracle_group = self._protect_obj_extend_info.get("oracle_group", "oinstall")
        self.select_agents = []
        self.mount_backup_data_path = []
        self.enc_algo = ""
        self.enc_key = ""  # 未加密的秘钥
        self.encrypted_key = ""  # 加密的秘钥
        self.block_change_tracking = True  # 标记备份实例是否开启块跟踪
        self.pdb_list = json.loads(self._protect_obj_extend_info.get('pdb', '[]'))
        self._cluster_nodes = []
        self.sub_type = self._protect_obj.get("subType", "")
        self.parent_sub_type = self._protect_obj_extend_info.get("parentSubType", "")
        self.get_oracle_info_by_sub_type()

        asm_info = json.loads(self._protect_obj_auth_extend_info.get("asmInfo", "{}"))
        self._asm_install_user_name = asm_info.get("installUsername", "grid")
        self._asm_user_name = asm_info.get("authKey", "")
        self._asm_pass_word = asm_info.get("authPwd", "")
        self._backup_task_sla = json.loads(self._job_dict.get("extendInfo", {}).get("backupTask_sla", "{}"))
        self._delete_archived_log = self._job_dict.get("extendInfo", {}).get("delete_archived_log", "false")
        self._delete_before_time = self._job_dict.get("extendInfo", {}).get("delete_before_time", '')
        self._delete_before_time_unit = self._job_dict.get("extendInfo", {}).get("delete_before_time_unit", '')
        self._action_result = ActionResult(code=ExecuteResultEnum.SUCCESS.value)
        self._init_backup_param()

    @staticmethod
    def get_windows_last_file_set(file_list):
        last_file_set = set()
        last_file_path_set = set()
        for file_name in file_list:
            if '.DBF' in file_name:
                data_file_name = file_name.split('\\')[1]
                last_file_path = file_name.split(':')[0]
                if data_file_name != ".snapshot":
                    last_file_set.add(data_file_name)
                    last_file_path_set.add(last_file_path)
        return last_file_set, last_file_path_set

    @staticmethod
    def get_last_file_set(file_list):
        last_file_set = set()
        last_file_path_set = set()
        for file_name in file_list:
            if '.dbf' in file_name:
                data_file_name = file_name.split('/')[1]
                last_file_path = file_name.split(':')[0]
                if data_file_name != ".snapshot":
                    last_file_set.add(data_file_name)
                    last_file_path_set.add(last_file_path)
        return last_file_set, last_file_path_set

    @staticmethod
    def report_job_details_linux_windows(pid, job_id, sub_job_id, log_detail, task_status):
        job_detail = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, progress=100,
                                   logDetail=[log_detail], taskStatus=task_status)
        if platform.system().lower() == Platform.WINDOWS:
            invoke_rpc_tool_interface(job_id,
                                      RpcToolInterface.REPORT_JOB_DETAIL, job_detail.dict(by_alias=True))
        else:
            report_job_details(pid, job_detail)

    def get_param_dict(self, pid):
        if platform.system().lower() == "windows":
            self._param_dict = ParamFileUtil.parse_param_windows_file(pid)
        else:
            self._param_dict = ParamFileUtil.parse_param_file(pid)

    def get_oracle_info_by_sub_type(self):
        if self.is_cluster_type():
            self._cluster_nodes = json.loads(self._job_dict.get("extendInfo", {}).get("nodes", "{}"))
            select_agents = self._job_dict.get("extendInfo", {}).get("balance_agents", "")
            # 当修改保护不选取主机的时候，select_agents为null
            if select_agents:
                self.select_agents = select_agents.split(";")
        else:
            self._db_instance_name = self._protect_obj_extend_info.get("inst_name", "")
            self._asm_instance_name = json.loads(
                self._protect_obj_extend_info.get("asm_info", '{}')).get("instName", "")

    def get_repository(self):
        repositories_info = parse_backup_path(self._job_dict.get("repositories", []))
        if repositories_info:
            self._data_area = repositories_info.get("data_repository", [""])
            self._log_area = repositories_info.get("log_repository", [""])[0]
            self._meta_area = repositories_info.get("meta_repository", [""])[0]
            self.get_cache_area(repositories_info)

    def get_cache_area(self, repositories_info):
        if self.is_storage_snapshot_backup() and platform.system().lower() == "windows":
            # windows 缓存路径 \\ip\\资源id\\jobid
            # linux 缓存路径 \\ip\\jobid,存储快照模式下两中环境目录共用需要将资源id删除
            cache_area = repositories_info.get("cache_repository", [""])[0]
            if cache_area and cache_area.split('\\')[-2] == self._protect_obj.get("id"):
                self._cache_area = cache_area.replace(self._protect_obj.get("id") + '\\', '')
                if not os.path.exists(self._cache_area):
                    os.mkdir(self._cache_area)
        else:
            self._cache_area = repositories_info.get("cache_repository", [""])[0]

    def set_action_result(self, code, body_err, message, error_params=None):
        """
        设置结果
        :param code: 状态码
        :param body_err: 错误码
        :param message: 错误信息
        :param error_params: 错误信息参数
        """
        self._action_result.code = code
        self._action_result.body_err = body_err
        self._action_result.message = message
        self._action_result.body_err_params = error_params

    def get_action_result(self):
        return self._action_result

    def check_every_node_can_backup(self, db_instance_name, install_user_name):
        """
        检查每个结点是否能备份
        :param db_instance_name: 数据库实例名
        :param install_user_name: 数据库安装用户名
        :return: 能备份/不能备份
        """
        # 检查数据库状态
        primary_flag = check_database_is_primary(self._pid, db_instance_name, install_user_name)
        if not check_database_status(self._pid, db_instance_name, install_user_name, primary_flag):
            self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value,
                                   ErrorCode.DATABASE_OFFLINE_ERR.value,
                                   "Database system is shut down.")
            logger.error(f"Database system is shut down, pid: {self._pid}, job_id: {self._job_id}.")
            return False

        logger.info(f"Check database status success, pid: {self._pid}, job_id: {self._job_id}.")

        auth_type = int(get_env_variable(f"job_protectObject_auth_authType_{self._pid}"))
        # ADG模式需要判断从库是数据库认证
        logger.info(f"Database auth type is: {auth_type}, pid: {self._pid}, job_id: {self._job_id}.")
        if not primary_flag and \
                auth_type == PMAuthTypeEnum.OS_AUTH:
            self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value,
                                   ErrorCode.ERROR_STANDBY_DATABASE_NEED_DB_AUTH.value,
                                   "Standby database need database auth",
                                   [
                                       AuthTypeMapToStr.get(AuthTypeMap.get(int(auth_type))),
                                       AuthTypeMapToStr.get(ShellAuthTypeEnum.DATABASE_AUTH)
                                   ])

            logger.error(f"Standby database need database auth, pid: {self._pid}, job_id: {self._job_id}.")
            return False
        logger.info(f"Check database auth type is success, pid: {self._pid}, job_id: {self._job_id}.")

        # 检查数据库是否处于归档模式
        if not query_archive_mode(self._pid, db_instance_name, install_user_name):
            self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value,
                                   ErrorCode.ARCHIVE_MODE_NOT_ENABLED.value,
                                   "Archive mode is off.")
            logger.error(f"Archive mode is off, pid: {self._pid}, job_id: {self._job_id}.")
            return False

        logger.info(f"The node can backup, database instance name is: {db_instance_name}, "
                    f"pid: {self._pid}, job_id: {self._job_id}.")
        return True

    def get_cluster_nodes_info(self, cluster_nodes):
        """
        :param cluster_nodes: 集群结点信息
        :return: 组装过的集群结点信息
        """
        cluster_nodes_info = []
        for nodes in cluster_nodes:
            cluster_node = {
                'node_inst_name': nodes.get("inst_name", ""),
                'host_id': nodes.get('hostId', ""),
                'oracle_home': nodes.get("oracle_home", ""),
                'oracle_base': nodes.get("oracle_base", ""),
                'node_install_user_name': nodes.get("installUsername", "oracle")
            }
            # 获取ASM相关信息
            node_asm_info = nodes.get("asm_info", "{}")
            oracle_ip_info = json.loads(nodes.get("oracle_ip_infos", '[]'))
            cluster_node['node_asm_inst_name'] = json.loads(node_asm_info).get("instName", "")
            cluster_nodes_info.append(cluster_node)

            agent_id = nodes.get('hostId', "")
            if agent_id in self.select_agents:
                instance_node_info = f"{nodes.get('inst_name', '')}," \
                                     f"{oracle_ip_info[0].get('ip', '')},{oracle_ip_info[0].get('port', '')}"
                self._ip_port_info.append(instance_node_info)

        self._node_number = len(self._ip_port_info)
        logger.info(f"Get cluster nodes info is: {cluster_nodes_info}, select agents is: {self.select_agents}, "
                    f"ip_port_info: {self._ip_port_info}, node_number: {self._node_number}, "
                    f"job_id: {self._job_id}, sub_job_id: {self._sub_job_id}.")

        return cluster_nodes_info

    def allow_backup_in_local_node(self):
        """
        查询当前结点是否允许备份
        :return: 允许/不允许
        """
        # 前置任务已经执行检查，子任务不需要再检查，直接返回
        if self._param_dict.get("subJob", {}):
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return True
        if self.is_storage_snapshot_backup() and not self.need_check_node_can_backup():
            logger.info(f"need_check_node_can_backup false")
            self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value,
                                   "", "no need exec allow_backup_in_local_node")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return False
        if self.is_cluster_type():
            # 判断每个结点是否允许备份，当前选择第一个能备份的结点返回True,其余返回False
            self.cluster_get_oracle_instance_name()
            if self.check_every_node_can_backup(self._db_instance_name, self._install_user_name):
                output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
                return True
            # 否则所有结点都不能备份
            logger.info(f"This node can not back up, pid: {self._pid}, job_id: {self._job_id}.")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return False
        else:
            result = self.check_every_node_can_backup(self._db_instance_name, self._install_user_name)
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return result

    def need_check_node_can_backup(self):
        protect_agents = []
        current_agent = get_current_agent_uuid(self._job_extend_info.get('agents', []))
        if self.is_cluster_type():
            instances = json.loads(self._protect_obj_extend_info.get("instances", []))
            for instance in instances:
                protect_agents.append(instance.get('hostId', ''))
        else:
            protect_agent_id = self._job_dict.get("protectEnv", {}).get('id')
            protect_agents.append(protect_agent_id)
        logger.info(f"need_check_node_can_backup, current_agent {current_agent}, protect_agents"
                    f" {protect_agents}")
        return current_agent in protect_agents

    def query_job_permission(self):
        """
        功能描述：设置文件系统权限, 主任务执行, 设置权限为oracle用户权限
        """
        # windows场景该函数不需要实现
        response = PermissionInfo()
        if platform.system().lower() == "windows":
            return response

        # 这个接口的参数需要单独从application获取，与其余接口不同
        application = self._param_dict.get("application", {})
        install_user_name = application.get("extendInfo", {}).get("install_user_name", "oracle")
        oracle_group = application.get("extendInfo", {}).get("oracle_group", "oinstall")
        logger.info(f"Query job permission, install_user_name: {install_user_name}, "
                    f"oracle_group: {oracle_group}, pid: {self._pid}.")

        response.user = install_user_name
        response.group = oracle_group
        output_result_file(self._pid, response.dict(by_alias=True))
        return response

    def check_backup_job_type(self):
        """
        检查备份任务类型，判断是否增量转全量
        """
        if self._backup_type == BackupTypeEnum.FULL_BACKUP.value:
            logger.info(
                f"This type backup job no need to change, backup_type: {self._backup_type}, job_id: {self._job_id}.")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return False

        # 集群需要获取对应结点的实例名称
        if self.is_cluster_type():
            self.cluster_get_oracle_instance_name()
        logger.info(f"Function check backup job type, instance name is: {self._db_instance_name}.")

        # 针对pdb集备份，查询最近一次全量副本，若与最近一次全量备份的pdb列表不同，则转全量备份
        previous_copy = get_previous_copy_info(self._protect_obj, [RpcParamKey.FULL_COPY], self._job_id)
        previous_pdb_list = previous_copy.get("extendInfo", {}).get("pdb_list", "")
        if not set(self.pdb_list) == set(previous_pdb_list) and self.sub_type == OracleDataBaseType.ORACLE_PDB:
            logger.info(f"pdb list has changed, job_id: {self._job_id}.")
            self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value, ErrorCode.INC_TO_FULL_ERR.value,
                                   "pdb list has changed.")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return True
        # 根据最近一次全量副本，看是否为升级上来的副本（老框架升级为新框架），如果是,则转全量备份
        oracle_migrate = previous_copy.get("extendInfo", {}).get("migrate", "")
        if oracle_migrate:
            self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value, ErrorCode.INC_TO_FULL_ERR.value,
                                   "Can not apply this type backup job.")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return True

        # 日志备份时如果发现有新增数据文件则转全量备份
        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            return self.check_log_convert_to_full()

        # 如果数据不完整，转全量
        if not self.is_storage_snapshot_backup() and self.check_incre_to_full_rman_backup():
            self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value,
                                   ErrorCode.INC_TO_FULL_ERR.value,
                                   "backup data is not full.")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return True

        # 针对增量备份与差异备份副本，如果与上一次的备份模式（是否是存储快照模式）不一致，需要转全量
        if (self._backup_type == BackupTypeEnum.INCRE_BACKUP or self._backup_type == BackupTypeEnum.DIFF_BACKUP) \
                and self.is_need_full_backup_in_storage_snapshot():
            logger.info(f"backup transport mode not same, job_id: {self._job_id}.")
            self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value, ErrorCode.INC_TO_FULL_ERR.value,
                                   "Can not apply this type backup job.")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return True
        # 存储快照增量备份，存储设备和上次使用的设备不一致，转全量
        if self.is_storage_snapshot_backup():
            return self.check_incre_to_full_storage_backup()
        logger.info(
            f"Backup job no need to change full backup, backup_type: {self._backup_type}, job_id: {self._job_id}.")
        output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
        return False

    def check_incre_to_full_rman_backup(self):
        data_complete = self.check_addition_file_exist()
        if not data_complete:
            # 如果数据不完整，转全量
            logger.info(f'job: {self._job_id} addition file not full')
            return True
        if not exist_first_backup(self._data_area[0]) or \
                not check_database_id_same(self._data_area[0], self._pid, self._job_id, self._db_instance_name,
                                           self._install_user_name):
            logger.info(f"This type backup job need to change full_back_up, job_id: {self._job_id}.")
            return True
        return False

    def check_incre_to_full_storage_backup(self):
        logger.info(f"check_incre_to_full_storage_backup")
        params = self.get_backup_params()
        ret, access_storage_index = OracleStorageBackupService(self._pid, self._job_id, self._param_dict, params) \
            .check_last_backup_storage_is_same()
        if not ret:
            if access_storage_index == -1:
                # 存储设备都不可用，任务失败
                logger.error(f"access_storage is null.")
                log_detail = LogDetail(logInfo=OracleReportLabel.PLUGIN_STORAGE_AUTH_INFO_FAILED_LABEL,
                                       logInfoParam=[],
                                       logLevel=DBLogLevel.ERROR)
                self.report_job_details_linux_windows(self._pid, self._job_id, self._sub_job_id, log_detail,
                                                      SubJobStatusEnum.FAILED.value)
                response = ActionResponse(code=ExecuteResultEnum.INTERNAL_ERROR,
                                          bodyErr=ErrorCode.ERROR_STORAGE_AUTH_INFO_FAILED.value,
                                          message="Can not apply this type backup job")
                output_result_file(self._pid, response.dict(by_alias=True))
                return False
            # 可用的存储设备和上次存储不一样，转全量
            logger.warn(f"Available storage not equal last backup, to full storage backup.")
            log_detail = LogDetail(logInfo=OracleReportLabel.AVAILABLE_STORAGE_NOT_EQUAL_LAST_BACKUP_LABEL,
                                   logInfoParam=[],
                                   logLevel=DBLogLevel.WARN)
            self.report_job_details_linux_windows(self._pid, self._job_id, self._sub_job_id, log_detail,
                                                  SubJobStatusEnum.RUNNING.value)
            self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value, ErrorCode.INC_TO_FULL_ERR.value,
                                   "storage not equal last backup.")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return True
        # 检查此次备份的lun数量、大小和上次备份是否一致，不一致的话要转全量
        ret = OracleStorageBackupService(self._pid, self._job_id, self._param_dict, params) \
            .check_backup_lun_is_same_with_pre()
        if not ret:
            logger.info(f"check_backup_lun_is_same_with_pre False.")
            self.covert_to_full_backup_with_label(OracleReportLabel.PLUGIN_LUN_NOT_SAME_WITH_PRE_LABEL,
                                                  ErrorCode.INC_TO_FULL_ERR.value)
            return True
        logger.info(f"incre no need to change full backup")
        output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
        return False

    def check_log_convert_to_full(self):
        if self.is_log_need_convert_to_full():
            # 加任务锁，如果加锁失败则任务失败
            if not self.try_lock_backup_job_resource():
                logger.error("try_lock_backup_job_resource failed")
                return False
            else:
                logger.info(f"log change to full backup")
                self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value,
                                       ErrorCode.LOG_TO_FULL_ERR.value,
                                       "Can not apply this type backup job.")
                output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
                return True

        if self.is_storage_snapshot_backup():
            # 检查此次备份的lun数量、大小和上次备份是否一致，不一致的话要转全量
            params = self.get_backup_params()
            ret = OracleStorageBackupService(self._pid, self._job_id, self._param_dict, params) \
                .check_backup_lun_is_same_with_pre()
            if not ret:
                logger.info(f"check_backup_lun_is_same_with_pre False.")
                log_detail = LogDetail(logInfo=OracleReportLabel.plugin_log_backup_lun_not_same_with_pre_label,
                                       logInfoParam=[],
                                       logLevel=DBLogLevel.ERROR)
                self.report_job_details_linux_windows(self._pid, self._job_id, self._sub_job_id, log_detail,
                                                      SubJobStatusEnum.FAILED.value)
                response = ActionResponse(code=ExecuteResultEnum.INTERNAL_ERROR,
                                          bodyErr=ErrorCode.INTERNAL_ERROR.value,
                                          message="Can not apply this type backup job")
                output_result_file(self._pid, response.dict(by_alias=True))
                return False
        logger.info(f"log no need to change full backup")
        output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
        return False

    def try_lock_backup_job_resource(self):
        if not lock_backup_job_resource(self._db_uuid, self._job_id, invoke_rpc_tool_interface):
            logger.error(f"fail to change backup_type log to full")
            log_detail = LogDetail(logInfo=OracleReportLabel.LOG_CONVERT_TO_FULL_FAILED,
                                   logInfoParam=[],
                                   logLevel=DBLogLevel.ERROR)
            self.report_job_details_linux_windows(self._pid, self._job_id, self._sub_job_id, log_detail,
                                                  SubJobStatusEnum.FAILED.value)
            response = ActionResponse(code=ExecuteResultEnum.INTERNAL_ERROR,
                                      bodyErr=ErrorCode.INTERNAL_ERROR.value,
                                      message="Can not apply this type backup job")
            output_result_file(self._pid, response.dict(by_alias=True))
            return False
        return True

    def covert_to_full_backup_with_label(self, label, error_code):
        log_detail = LogDetail(logInfo=label,
                               logInfoParam=[],
                               logLevel=DBLogLevel.WARN)
        self.report_job_details_linux_windows(self._pid, self._job_id, self._sub_job_id, log_detail,
                                              SubJobStatusEnum.RUNNING.value)
        self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value, error_code,
                               "storage not equal last backup.")
        output_result_file(self._pid, self.get_action_result().dict(by_alias=True))

    def is_need_full_backup_in_storage_snapshot(self):
        previous_data_copy = get_previous_copy_info(
            self._protect_obj, [RpcParamKey.FULL_COPY, RpcParamKey.INCREMENT_COPY], self._job_id)
        copy_transport_mode = previous_data_copy.get("extendInfo", {}) \
            .get(ORACLEJsonConstant.COPY_TRANSPORT_MODE, "")
        storage_snap_flag = self.is_storage_snapshot_backup()
        if copy_transport_mode == BackupTransportMode.STORAGE_LAYER and not storage_snap_flag:
            return True
        if not copy_transport_mode and storage_snap_flag:
            return True
        return False

    def is_storage_snapshot_backup(self):
        storage_snap_flag = self._job_extend_info.get(ORACLEJsonConstant.STORAGE_SNAPSHOT_FLAG, '')
        return storage_snap_flag.lower() == 'true'

    def is_log_need_convert_to_full(self):
        logger.info(f'Try to check if log need convert to full.')
        previous_data_copy = get_previous_copy_info(self._protect_obj,
                                                    [RpcParamKey.FULL_COPY, RpcParamKey.INCREMENT_COPY,
                                                     RpcParamKey.DIFF_COPY], self._job_id)
        if not previous_data_copy:
            logger.info(f"Previous data copy not exist, log backup job need convert to full backup, "
                        f"backup_type: {self._backup_type}, job_id: {self._job_id}.")
            return True

        pre_copy_trans_mode = previous_data_copy.get("extendInfo", {}).get(ORACLEJsonConstant.COPY_TRANSPORT_MODE, "")
        storage_snap_flag = self.is_storage_snapshot_backup()
        pre_is_storage_layer = pre_copy_trans_mode == BackupTransportMode.STORAGE_LAYER
        is_need_convert = storage_snap_flag ^ pre_is_storage_layer
        if is_need_convert:
            # 当前非快照备份模式, 且上一个数据副本为快照备份副本, 日志需要转全量
            # 当前快照备份模式, 且上一个数据副本为非快照备份副本, 日志需要转全量
            logger.info(f"cur storage_snap_flag is {storage_snap_flag}, pre_copy_trans_mode is {pre_copy_trans_mode}, "
                        f"cur storage_snap_flag not match pre_copy_trans_mode, job_id: {self._job_id}")
            return True

        file_list = previous_data_copy.get("extendInfo", {}).get("filelistParams", {})
        if platform.system().lower() == "windows":
            last_file_set, _ = self.get_windows_last_file_set(file_list)
        else:
            last_file_set, _ = self.get_last_file_set(file_list)
        try:
            if self.sub_type == OracleDataBaseType.ORACLE_PDB:
                params = self.get_backup_params()
                data_file_set = OraclePdbBackupService(self._pid, self._job_id, params).get_pdb_data_file_set()
            else:
                data_file_set = get_data_file_set(self._pid, self._db_instance_name, self._install_user_name)
        except Exception as exception:
            logger.error(f'Get data file set failed, {exception}', exc_info=True)
            return False
        logger.info(f"data_file_set:{data_file_set}, last_file_set:{last_file_set},"
                    f"backup_type: {self._backup_type}, job_id: {self._job_id}.")
        if not self.is_storage_snapshot_backup() and data_file_set != last_file_set:
            logger.info(f"Log backup job need convert to full backup"
                        f"backup_type: {self._backup_type}, job_id: {self._job_id}.")
            return True
        else:
            logger.info(f"Log backup job no need convert to full backup, backup_type: {self._backup_type}, "
                        f"job_id: {self._job_id}.")
            return False

    @retry_when_exception(retry_times=3, delay=3)
    def backup_prerequisite_change_file_owner(self, backup_path):
        """
        改变挂载点权限
        """
        if not check_os_user(self._install_user_name):
            logger.error(f"Not exist {self._install_user_name} os username.")
            raise Exception(f"Not exist install username.")
        cmd = f"chown -h {self._install_user_name}:{self._oracle_group} {backup_path}"
        return_code, output, err_str = execute_cmd(cmd)
        if return_code != CMDResult.SUCCESS.value:
            logger.error(f"Change owner failed, cmd:{cmd}, return_code:{return_code}, err_str:{err_str}, "
                         f"job_id:{self._job_id}.")
            raise Exception(f"Change owner failed.")
        logger.info(f'Run command successfully: {cmd}, job_id:{self._job_id}.')

    def backup_prerequisite(self):
        """
        前置任务
        """
        # 写备份进度文件到cache仓
        logger.info(f"backup_prerequisite cache_area: {self._cache_area}.")
        if self.is_cluster_type() and self.is_storage_snapshot_backup():
            self.cluster_get_oracle_instance_name()
        if self._backup_type == BackupTypeEnum.FULL_BACKUP.value:
            backup_path = self._data_area[0]
            # 全量备份需要清空数据仓
            clear_repository_dir(backup_path, self._job_id)
            logger.info(f"Succeed to clean backup storage: {backup_path}, pid: {self._pid}, job_id: {self._job_id}.")
        # 如果是存储快照备份，需要对存储打快照
        if self.is_storage_snapshot_backup() and self._backup_type != BackupTypeEnum.LOG_BACKUP.value:
            params = self.get_backup_params()
            ret, error_code, error_msg = OracleStorageBackupService(self._pid, self._job_id, self._param_dict,
                                                                    params).backup_prerequisite()
            if not ret:
                logger.error(f"backup_prerequisite failed: {error_code}, {error_msg}")
                if not error_msg:
                    error_msg = 'backup_prerequisite failed'
                self.set_action_result(ExecuteResultEnum.INTERNAL_ERROR.value, error_code, error_msg)
        result_file = os.path.join(self._cache_area, "BackupPrerequisiteProgress")
        output_execution_result(result_file, self.get_action_result().dict(by_alias=True))
        output_result_file(self._pid, self.get_action_result().dict(by_alias=True))

    def backup_prerequisite_progress(self):
        """
        查询前置任务进度
        """
        logger.info(f"Execute backup prerequisite progress, pid: {self._pid}, job_id: {self._job_id}.")
        # 当前根据是否存在BackupPrerequisiteProgress文件来上报前置任务进度
        progress = OracleProgress.PROGRESS_ONE_HUNDRED
        job_status = SubJobStatusEnum.COMPLETED.value
        file_path = os.path.join(self._cache_area, "BackupPrerequisiteProgress")

        if not os.path.exists(file_path):
            job_status = SubJobStatusEnum.RUNNING.value
            logger.error(
                f"Failed to query progress, job status: {job_status}, pid: {self._pid}, job_id: {self._job_id}.")
        else:
            logger.info(f"Reading prerequisite result file, path={file_path}, job_id: {self._job_id}.")
            with open(file_path, 'r', encoding='utf-8') as file_read:
                prerequisite_result = json.load(file_read)
                if prerequisite_result.get("code", "") == ExecuteResultEnum.INTERNAL_ERROR.value:
                    logger.error(f"Backup prerequisite job failed, job_id: {self._job_id}")
                    job_status = SubJobStatusEnum.FAILED.value
                    error_code = prerequisite_result.get("bodyErr", ErrorCode.BACKUP_FAILED.value)
                    contents = prerequisite_result.get("message", "")
                    log_detail = LogDetail(logInfo=ReportDBLabel.PRE_REQUISIT_FAILED, logInfoParam=[self._sub_job_id],
                                           logLevel=DBLogLevel.ERROR.value, logDetail=error_code,
                                           logDetailInfo=[contents])
                    output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, taskStatus=job_status,
                                           progress=progress, logDetail=[log_detail])
                    output_result_file(self._pid, output.dict(by_alias=True))
                    return progress, job_status

        logger.info(f"Succeed to query progress, job status: {job_status}, pid: {self._pid}, job_id: {self._job_id}.")
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                               taskStatus=job_status, progress=progress)
        output_result_file(self._pid, output.dict(by_alias=True))

        return progress, job_status

    def gen_sub_job(self):
        """
        生成备份子任务
        """
        logger.info(f"Enter gen_sub_job function, pid: {self._pid}, job_id:{self._job_id}.")
        if self.is_cluster_type():
            logger.info(
                f'Oracle-cluster start to generate_sub_job, job_id: {self._job_id}, sub_job_id: {self._sub_job_id}.')
            if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
                response = [
                    SubJob(policy=SubJobPolicyEnum.EVERY_NODE_ONE_TIME, jobName=BackupSubJobName.MOUNT_BIND,
                           jobId=self._job_id, jobPriority=SubJobPriorityEnum.JOB_PRIORITY_1.value).dict(by_alias=True),
                    SubJob(jobId=self._job_id, policy=SubJobPolicyEnum.ANY_NODE.value,
                           jobName=BackupSubJobName.BACK_TASK,
                           jobPriority=SubJobPriorityEnum.JOB_PRIORITY_2.value).dict(by_alias=True),
                ]
            else:
                # 如果是存储快照备份模式，根据LUN个数拆分子任务
                if self.is_storage_snapshot_backup():
                    response = OracleStorageBackupService(self._pid, self._job_id, self._param_dict).gen_sub_job()
                else:
                    back_up_sub_job = self.get_cluster_back_up_sub_job()
                    response = [
                        SubJob(jobId=self._job_id, policy=SubJobPolicyEnum.EVERY_NODE_ONE_TIME,
                               jobName=BackupSubJobName.MOUNT_BIND,
                               jobPriority=SubJobPriorityEnum.JOB_PRIORITY_1.value).dict(by_alias=True),
                        back_up_sub_job,
                        SubJob(jobId=self._job_id, policy=SubJobPolicyEnum.EVERY_NODE_ONE_TIME.value,
                               jobName=BackupSubJobName.SECRET_TASK,
                               jobPriority=SubJobPriorityEnum.JOB_PRIORITY_3.value).dict(by_alias=True),
                        SubJob(jobId=self._job_id, policy=SubJobPolicyEnum.ANY_NODE.value,
                               jobName=BackupSubJobName.CHECK_TASK,
                               jobPriority=SubJobPriorityEnum.JOB_PRIORITY_4.value).dict(by_alias=True)
                    ]
        else:
            logger.info(
                f'Oracle-single start to generate_sub_job, job_id: {self._job_id}, sub_job_id: {self._sub_job_id}.')
            # 如果是存储快照备份模式，根据LUN个数拆分子任务
            if self.is_storage_snapshot_backup() and self._backup_type != BackupTypeEnum.LOG_BACKUP.value:
                response = OracleStorageBackupService(self._pid, self._job_id, self._param_dict).gen_sub_job()
            else:
                if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
                    response = [
                        SubJob(jobId=self._job_id, policy=SubJobPolicyEnum.ANY_NODE.value, jobName='backup',
                               jobPriority=SubJobPriorityEnum.JOB_PRIORITY_1.value).dict(by_alias=True)
                    ]
                else:
                    response = [
                        SubJob(jobId=self._job_id, policy=SubJobPolicyEnum.ANY_NODE.value, jobName='backup',
                               jobPriority=SubJobPriorityEnum.JOB_PRIORITY_1.value).dict(by_alias=True),
                        SubJob(jobId=self._job_id, policy=SubJobPolicyEnum.ANY_NODE.value,
                               jobName=BackupSubJobName.CHECK_TASK,
                               jobPriority=SubJobPriorityEnum.JOB_PRIORITY_2.value).dict(by_alias=True)
                    ]
        output_result_file(self._pid, response)

    def get_cluster_back_up_sub_job(self):
        self.cluster_get_oracle_instance_name()
        login_params = self.build_login_param()
        cbd_is_open = check_cdb_is_open(self._pid, login_params)
        if cbd_is_open:
            logger.info(f"Start distributed cluster back sub job, pid is {self._pid}")
            cluster_nodes = self.get_cluster_nodes_info(self._cluster_nodes)
            pdb_open_instance_name = get_pdb_open_cluster_inst(self._pid, login_params)
            for node in cluster_nodes:
                if node.get('node_inst_name', '') == pdb_open_instance_name:
                    self.exec_cluster_back_job_host_id = node.get('host_id', '')
                    break
            if pdb_open_instance_name and self.exec_cluster_back_job_host_id:
                logger.info(f"Execute the backup subtask on node: {self.exec_cluster_back_job_host_id},"
                            f" instance_name: {pdb_open_instance_name}")
                self.exec_cluster_back_job_policy = SubJobPolicyEnum.FIXED_NODE.value
        return SubJob(jobId=self._job_id, policy=self.exec_cluster_back_job_policy,
                      jobName=BackupSubJobName.BACK_TASK,
                      execNodeId=self.exec_cluster_back_job_host_id,
                      jobPriority=SubJobPriorityEnum.JOB_PRIORITY_2.value).dict(by_alias=True)

    def write_backup_progress_file(self, status):
        """
        写备份信息文件
        :param status: 备份任务进度状态
        """
        content = BackupProgressInfo()
        content.s_time = int(time.time())
        content.c_time = int(time.time())
        content.status = status
        logger.info(f'Write backup info file, status is: {content.status}, pid: {self._pid}, job_id:{self._job_id}, '
                    f'sub_job_id:{self._sub_job_id}.')
        file_path = os.path.join(self._cache_area, f"BackupProgress_{self._sub_job_id}")
        write_tmp_json_file(file_path, self._job_id, content.dict(by_alias=True))

    def set_backup_result(self, status, error_code, message, log_detail_param=None):
        """
        设置备份任务结果
        :param status: 任务状态
        :param error_code: 错误码
        :param message: 任务消息
        :param log_detail_param: 错误码参数
        """
        if log_detail_param is None:
            log_detail_param = []
        file_path = os.path.realpath(os.path.join(self._cache_area, f"BackupProgress_{self._sub_job_id}"))
        task_info = read_tmp_json_file(file_path)
        if ("status" in task_info) and ("error_code" in task_info) and ("message" in task_info):
            task_info["status"] = status
            task_info["error_code"] = error_code
            task_info["message"] = message
            task_info["log_detail_param"] = log_detail_param
        logger.info(
            f"Set backup result, status: {status}, message: {message}, log_detail_param: {log_detail_param}, "
            f"pid:{self._pid}, job_id: {self._job_id}, sub_job_id:{self._sub_job_id}.")
        write_tmp_json_file(file_path, self._job_id, task_info)

    def get_backup_params(self):
        # 组装备份参数
        db_user_name = get_env_variable(f"job_protectObject_auth_authKey_{self._pid}")
        db_pass_word = get_env_variable(f"job_protectObject_auth_authPwd_{self._pid}")
        pdb_tag = self.pdb_list[0] if self.pdb_list else ''
        backup_scn = ""
        reset_logs_id = ""
        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            backup_scn, reset_logs_id = self.get_log_backup_params()
            self.backup_scn = backup_scn

        backup_data_paths = self.get_backup_data_paths()
        logger.info(f"Mount backup data path={backup_data_paths},job_id={self._job_id},sbu_job_id={self._sub_job_id}.")

        log_path = self._log_area
        if platform.system().lower() == "windows" and self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            log_path = PluginPathConstant.WINDOWS_ORACLE_LOG_FILE_PATH.value + self._db_uuid

        return {
            'AppName': self._protect_obj.get("name", ""),
            'DBUUID': self._db_uuid,
            'InstanceName': self._db_instance_name,
            'UserName': db_user_name,
            'Password': db_pass_word,
            'OracleHome': self._oracle_home,
            'OracleBase': self._oracle_base,
            # 之前sh没用到，现在用于没有配置环境变量的场景
            'AccessOracleHome': self.access_oracle_home,
            'AccessOracleBase': self.access_oracle_base,
            'ASMUserName': self._asm_user_name,
            'ASMPassword': self._asm_pass_word,
            'ASMInstanceName': self._asm_instance_name,
            'Channel': self._job_dict.get('extendInfo', {}).get('channel_number', 1),
            'Qos': self._job_param.get("qos", {}).get('bandwidth', 0),
            'Level': self._backup_type - 1,
            'truncateLog': OracleTruncateLogMap.get(self._delete_archived_log),
            'EncAlgo': self.enc_algo,
            'EncKey': self.enc_key,
            'LogPath': (PluginPathConstant.ORACLE_LOG_FILE_PATH.value + self._db_uuid
                        if platform.system() == 'AIX' else log_path),
            'DataPath': backup_data_paths,
            'LastBackupScn': backup_scn,
            'LastBackupResetLogsId': reset_logs_id,
            'IpPortInfo': ';'.join(self._ip_port_info),
            'NodeNumber': self._node_number,
            'OracleInstallUser': self._install_user_name,
            'GridInstallUser': self._asm_install_user_name,
            'OracleGroup': self._oracle_group,
            'PdbNames': json.loads(self._protect_obj_extend_info.get('pdb', '[]')),
            'PdbTag': pdb_tag,
            'ParentName': self._protect_obj.get("parentName", ""),
            'sub_type': self.sub_type
        }

    def get_backup_data_paths(self):
        backup_data_paths = ';'.join(self.mount_backup_data_path)
        if not self.is_storage_snapshot_backup() and self._backup_type != BackupTypeEnum.LOG_BACKUP.value and \
                self.is_cluster_type():
            if platform.system().lower() == "windows":
                file_path = os.path.join(PluginPathConstant.WINDOWS_TMP_PATH,
                                         PluginPathConstant.INPUT_TMP_FILE_PREFIX + f"{self._job_id}")
                encoding = locale.getdefaultlocale()[1]
                if os.path.exists(file_path):
                    backup_data_paths = read_result_file(file_path, encoding=encoding)
            else:
                file_path = os.path.join(PluginPathConstant.TMP_PATH,
                                         PluginPathConstant.INPUT_TMP_FILE_PREFIX + f"{self._job_id}")
                if os.path.exists(file_path):
                    backup_data_paths = read_result_file(file_path)
            # 删除mount_bind任务产生的文件
            delete_file(file_path)
        return backup_data_paths

    def mount_bind_backup_path(self):
        """
        绑定下发的备份路径到本地目录
        :return: 绑定成功/失败
        """

        # windows场景需要对日志仓进行mount bind
        if platform.system().lower() == "windows" and self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            mount_log_backup_path = PluginPathConstant.WINDOWS_ORACLE_LOG_FILE_PATH.value + self._db_uuid
            if not mount_bind_path(self._log_area, mount_log_backup_path):
                logger.error(f"Mount bind log path failed, job_id:{self._job_id}.")
                return False

        mount_bind_path_prefix = PluginPathConstant.ORACLE_DATA_FILE_PATH.value
        if platform.system().lower() == "windows":
            mount_bind_path_prefix = PluginPathConstant.WINDOWS_ORACLE_DATA_FILE_PATH.value

        if self._backup_type != BackupTypeEnum.LOG_BACKUP.value:
            for data_area_path in self._data_area:
                if platform.system().lower() == "windows":
                    logic_ip_path = data_area_path.split('\\')[-1]
                else:
                    logic_ip_path = data_area_path.split('/')[-1]
                mount_data_backup_path = mount_bind_path_prefix + logic_ip_path + self._db_uuid
                if not mount_bind_path(data_area_path, mount_data_backup_path):
                    logger.error(f"Mount bind data path failed, job_id:{self._job_id}.")
                    return False

                self.mount_backup_data_path.append(mount_data_backup_path)

        if self.is_cluster_type():
            # 将数据仓绑定的路径保存到文件里，备份任务参数获取
            write_tmp_file(';'.join(self.mount_backup_data_path),
                           PluginPathConstant.INPUT_TMP_FILE_PREFIX + f"{self._job_id}")

        logger.info("Mount bind backup path success.")
        return True

    def check_backup_params(self):
        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            if not self._log_area:
                return False
        else:
            if not self._data_area:
                return False
        return True

    def cluster_get_oracle_instance_name(self):
        """
        获取当前下发备份任务的集群结点的实例名
        """
        if platform.system().lower() == "windows":
            encoding = locale.getdefaultlocale()[1]
            host_id = read_result_file(PluginPathConstant.WINDOWS_HOST_SN_FILE_PATH, encoding=encoding)
        else:
            host_id = read_result_file(PluginPathConstant.LINUX_HOST_SN_FILE_PATH)
        logger.info(f"Read host_id is: {host_id}.")
        cluster_nodes = self.get_cluster_nodes_info(self._cluster_nodes)
        for cluster_node in cluster_nodes:
            if cluster_node.get("host_id", "") == host_id.strip():
                self._install_user_name = cluster_node.get("node_install_user_name", "")
                self._db_instance_name = cluster_node.get("node_inst_name", "")
                self._asm_instance_name = cluster_node.get("node_asm_inst_name", "")
                self._oracle_home = cluster_node.get("oracle_home", "")
                self._oracle_base = cluster_node.get("oracle_base", "")
                break

        logger.info(f"Backup node install_user_name: {self._install_user_name}, "
                    f"db_instance_name: {self._db_instance_name}, job_id: {self._job_id}, "
                    f"sub_job_id: {self._sub_job_id}.")

    def create_mount_bind_file(self):
        # 创建本地挂载的数据仓目录
        for data_area_path in self._data_area:
            logic_ip_path = data_area_path.split('/')[-1]
            bind_file_path = PluginPathConstant.ORACLE_DATA_FILE_PATH.value + logic_ip_path + self._db_uuid
            create_dir_recursive(bind_file_path)
            if not self.is_storage_snapshot_backup():
                self.backup_prerequisite_change_file_owner(bind_file_path)

        # 创建本地挂载的日志仓目录
        bind_log_path = PluginPathConstant.ORACLE_LOG_FILE_PATH.value + self._db_uuid
        if not os.path.exists(bind_log_path):
            create_dir_recursive(bind_log_path)

    def get_aix_data_repository(self):
        repositories_info = self._job_dict.get("repositories", [])
        for repository in repositories_info:
            if repository.get('repositoryType') == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                return repository

        return {}

    def get_aix_log_repository(self):
        repositories_info = self._job_dict.get("repositories", [])
        for repository in repositories_info:
            if repository.get('repositoryType') == RepositoryDataTypeEnum.LOG_REPOSITORY.value:
                return repository

        return {}

    def mount_aix_bind_backup_path(self):

        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            repository = self.get_aix_log_repository()
        else:
            repository = self.get_aix_data_repository()

        logger.info(f'Aix mount repository is : {repository}, pid: {self._pid}, job_id: {self._job_id}.')
        remote_host_array = repository.get(ORACLEJsonConstant.REMOTE_HOST, [])
        remote_path = repository.get(ORACLEJsonConstant.REMOTE_PATH, "")
        repository[ORACLEJsonConstant.REMOTE_HOST] = []

        params = {
            'install_user_name': self._install_user_name,
            'oracle_group': self._oracle_group,
            'file_mode': 'O750',
            'sub_type': self._protect_obj.get("subType", ""),
            'fibre_channel': self._job_dict.get("extendInfo", {}).get('fibreChannel', ""),
            'data_repository': [],
            'cmd': 'MountRepositoryByPlugin'
        }

        # mount_bind AIX日志仓
        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            # 调用挂载接口
            for remote_host in remote_host_array:
                repository[ORACLEJsonConstant.REMOTE_HOST] = [remote_host]
                mount_log_backup_path = PluginPathConstant.ORACLE_LOG_FILE_PATH.value + self._db_uuid
                repository[ORACLEJsonConstant.PATH] = [mount_log_backup_path]
                repository[ORACLEJsonConstant.REMOTE_PATH] = os.path.join(remote_path, self._job_id)  # 需要挂载job_id这一层
                params['repository'] = repository
                if call_rc_tool_mount_cmd(self._job_id, params):
                    logger.info("Mount bind aix log backup path success.")
                    return True

            logger.info("Mount bind aix log backup path failed.")
            return False

        for data_area_path in self._data_area:
            logic_ip_path = data_area_path.split('/')[-1]
            mount_data_backup_path = PluginPathConstant.ORACLE_DATA_FILE_PATH.value + logic_ip_path + self._db_uuid
            for remote_host in remote_host_array:
                if remote_host.get('ip') == logic_ip_path:
                    repository[ORACLEJsonConstant.REMOTE_HOST] = [remote_host]
                    repository[ORACLEJsonConstant.PATH] = [mount_data_backup_path]
                    params['repository'] = repository
                    break
            # 调用挂载接口
            if not call_rc_tool_mount_cmd(self._job_id, params):
                return False
            self.mount_backup_data_path.append(mount_data_backup_path)

        if self.is_cluster_type():
            # 将数据仓绑定的路径保存到文件里，备份任务参数获取
            write_tmp_file(';'.join(self.mount_backup_data_path),
                           PluginPathConstant.INPUT_TMP_FILE_PREFIX + f"{self._job_id}")

        logger.info("Mount bind aix data backup path success.")
        return True

    def mount_bind_data_path(self):
        if platform.system() == 'AIX':
            if not self.mount_aix_bind_backup_path():
                logger.error(f"Failed to mount aix bind backup path, pid: {self._pid}, job_id: {self._job_id}.")
                self.set_backup_result(SubJobStatusEnum.FAILED.value, ErrorCode.PLUGIN_CANNOT_BACKUP_ERR.value,
                                       "Failed to mount aix bind path.")
                output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
                return
        else:
            # 挂载失败则需要上报失败，结束子任务
            if not self.mount_bind_backup_path():
                logger.error(f"Failed to mount bind backup path, pid: {self._pid}, job_id: {self._job_id}.")
                self.set_backup_result(SubJobStatusEnum.FAILED.value, ErrorCode.PLUGIN_CANNOT_BACKUP_ERR.value,
                                       "Failed to mount bind path.")
                output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
                return

        # 集群的时候需要结束mount bind子任务，并且上报完成
        if self.is_cluster_type():
            if self.is_storage_snapshot_backup() and self._backup_type != BackupTypeEnum.LOG_BACKUP.value:
                return
            self.set_backup_result(SubJobStatusEnum.COMPLETED.value, "", "")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))

    def solve_mount_bind_backup_sub_task(self):
        logger.info(f'job id: {self._job_id}, solve_mount_bind_backup_sub_task')
        # 卸载挂载点目录--针对未走到后置任务卸载的场景，需要在备份的时候先卸载上次残留
        self.umount_bind_backup_path()
        # 创建挂载点以及修改挂载点权限
        if platform.system().lower() != "windows":
            self.create_mount_bind_file()
        # 进行备份路径绑定
        self.mount_bind_data_path()

    def solve_backup_sub_task(self):

        if self.is_cluster_type():
            # 集群需要获取对应的实例名称
            self.cluster_get_oracle_instance_name()
        else:
            logger.info(f"Enter Oracle single backup Task, pid: {self._pid}. job_id: {self._job_id}.")

        logger.info(
            f"Backup node install_user_name: {self._install_user_name}, db_instance_name: {self._db_instance_name}.")
        self.enc_algo = self._backup_task_sla.get("policy_list", [])[0].get("ext_parameters", {}).get(
            "encryption_algorithm", "")
        if self.enc_algo:
            self.enc_key = get_env_variable(f"job_protectObject_extendInfo_EncKey_{self._pid}")
            self.encrypted_key = get_env_variable(f"job_protectObject_extendInfo_Encrypted_{self._pid}")

        logger.info(f"Enter backup Task, backup_type:{self._backup_type}, enc_algo: {self.enc_algo}, pid: {self._pid}, "
                    f"job_id: {self._job_id}, sub_job_id: {self._sub_job_id}.")
        login_params = self.build_login_param()
        self.block_change_tracking = check_database_block_change_tracking(self._pid, login_params)

        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            self.backup_log()
        else:
            # 初始化数据库
            if not self.is_storage_snapshot_backup():
                MetaDataSqliteService.init_database(self._meta_area)
            if self.sub_type != OracleDataBaseType.ORACLE_PDB:
                self.save_copy_table_tree()
            self.backup_data()

    def copy_oracle_secret_task(self):
        # 获取集群实例
        self.cluster_get_oracle_instance_name()

        oracle_version = self._protect_obj_extend_info.get("version", {})
        oracle_major_version = oracle_version.split(".")[0]
        logger.info(f"Copy secret task running, oracle_home: {self._oracle_home}, oracle_version: {oracle_version}, "
                    f"oracle_major_version: {oracle_major_version}, job_id: {self._job_id}")
        if int(oracle_major_version) > 11:
            self.set_backup_result(SubJobStatusEnum.COMPLETED.value, "", "")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return

        additional_path = os.path.join(self._data_area[0], "additional")

        cmd = f"cp -d -r -p {self._oracle_home}/dbs/orapw{self._db_instance_name} " \
              f"{additional_path}/dbs/orapw{self._db_instance_name}"
        logger.info(f"Additional path is: {additional_path}, cmd is: {cmd}, job_id: {self._job_id}")

        return_code, output, err_str = execute_cmd(cmd)
        if return_code != CMDResult.SUCCESS.value:
            logger.error(f"Backup secret failed, cmd:{cmd}, return_code:{return_code}, err_str:{err_str}, "
                         f"job_id:{self._job_id}.")
            self.set_backup_result(SubJobStatusEnum.FAILED.value, ErrorCode.INTERNAL_ERROR.value,
                                   "Copy secret failed.")
        else:
            logger.info(f'Backup secret successfully, job_id:{self._job_id}.')
            self.set_backup_result(SubJobStatusEnum.COMPLETED.value, "", "")

        output_result_file(self._pid, self.get_action_result().dict(by_alias=True))

    def solve_check_backup_data_task(self):
        logger.info(f'job id: {self._job_id}, start solve check backup data task')
        # 集群需要获取对应结点的实例名称
        if self.is_cluster_type():
            self.cluster_get_oracle_instance_name()
        if self.sub_type == OracleDataBaseType.ORACLE_PDB:
            params = self.get_backup_params()
            ret = OraclePdbBackupService(self._pid, self._job_id, params).check_datafile_consistency(self._data_area[0])
        else:
            ret = self.check_datafile_consistency()
        if ret:
            logger.info(f'job id: {self._job_id}, solve check backup data task success')
            self.set_backup_result(SubJobStatusEnum.COMPLETED.value, "", "")
        else:
            logger.error(f'job id: {self._job_id}, solve check backup data task failed')
            self.set_backup_result(SubJobStatusEnum.FAILED.value, ErrorCode.DATA_FILE_IN_COMPLETE.value,
                                   "data file is not complete.")

    def backup(self):
        """
        oracle备份任务
        """
        # 检查备份参数
        if not self.check_backup_params():
            self.set_backup_result(SubJobStatusEnum.FAILED.value, ErrorCode.INTERNAL_ERROR.value,
                                   "backup repository is invalid.")
            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return

        # 开始写备份进度文件
        self.write_backup_progress_file(SubJobStatusEnum.RUNNING.value)

        # 集群需要处理mount_bind子任务
        if self._param_dict.get("subJob", {}).get("jobName", "") == BackupSubJobName.MOUNT_BIND:
            self.solve_mount_bind_backup_sub_task()
            return

        # 集群需要处理secret任务
        if self._param_dict.get("subJob", {}).get("jobName", "") == BackupSubJobName.SECRET_TASK:
            self.copy_oracle_secret_task()
            return
        # 处理检查备份数据任务
        if self._param_dict.get("subJob", {}).get("jobName", "") == BackupSubJobName.CHECK_TASK:
            self.solve_check_backup_data_task()
            return
        # 单机处理绑定任务
        if not self.is_cluster_type():
            if not self.is_storage_snapshot_backup() or self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
                self.solve_mount_bind_backup_sub_task()

        # 如果cache仓存在job_id文件，则说明备份任务已经完成了，直接设置备份任务完成
        backup_task_result_file = os.path.join(self._cache_area, self._sub_job_id + "_backup_task_success")
        report_copy_result_file = os.path.join(self._cache_area, self._sub_job_id + "_report_backup_copy_success")
        if os.path.exists(backup_task_result_file):
            if os.path.exists(report_copy_result_file):
                self.set_backup_result(SubJobStatusEnum.COMPLETED.value, "", "")
            else:
                self.call_report_backup_copy()

            output_result_file(self._pid, self.get_action_result().dict(by_alias=True))
            return

        # 处理backup子任务
        self.solve_backup_sub_task()

    def query_scan_repositories(self):
        # E6000适配
        logger.info(f"query_scan_repositories, req_id: {self._pid}, job_id: {self._job_id}, "
                    f"sub_id: {self._sub_job_id}.")

        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            logger.info(f"query_scan_repositories for log backup, job_id: {self._job_id}, "
                        f"sub_id: {self._sub_job_id}.")
            # log仓的meta区 /Database_{resource_id}_LogRepository_su{num}/{ip}/meta/{job_id}
            meta_copy_path = os.path.join(os.path.dirname(self._log_area), 'meta', self._job_id)
            # log仓的data区 /Database_{resource_id}_LogRepository_su{num}/{ip}/{job_id}
            data_path = self._log_area
            # /Database_{resource_id}_LogRepository_su{num}/{ip}
            save_path = os.path.dirname(self._log_area)
        else:
            logger.info(f"query_scan_repositories for data backup, job_id: {self._job_id}, "
                        f"sub_id: {self._sub_job_id}.")

            # meta/Database_{resource_id}_InnerDirectory_su{num}/source_policy_{job_id}/Context_Global_MD/{ip}
            meta_copy_path = self._meta_area
            # data/Database_{resource_id}_InnerDirectory_su{num}/source_policy_{job_id}/Context/{ip}
            data_path = self._data_area[0]
            # meta/Database_{resource_id}_InnerDirectory_su{num}/source_policy_{job_id}/Context_Global_MD/{ip}
            save_path = self._meta_area
        if not os.path.exists(meta_copy_path):
            if platform.system().lower() == "windows":
                create_dir_recursive(meta_copy_path)
                resetlogs_id_map_path = os.path.join(meta_copy_path, LogMetaFileName.RESETLOGS_ID_MAP)
                if not os.path.exists(resetlogs_id_map_path):
                    touch_file(resetlogs_id_map_path)
            else:
                exec_mkdir_cmd(meta_copy_path, mode=0x777)
        logger.info(f"query_scan_repositories, meta_copy_path: {meta_copy_path}, "
                    f"data_path: {data_path}, save_path: {save_path}.")
        log_meta_copy_repo = RepositoryPath(repositoryType=RepositoryDataTypeEnum.META_REPOSITORY.value,
                                            scanPath=meta_copy_path)
        log_data_repo = RepositoryPath(repositoryType=RepositoryDataTypeEnum.DATA_REPOSITORY.value, scanPath=data_path)
        scan_repos = ScanRepositories(scanRepoList=[log_data_repo, log_meta_copy_repo], savePath=save_path)
        logger.info(f"query_scan_repositories success, req_id: {self._pid}, job_id: {self._job_id}, "
                    f"sub_id: {self._sub_job_id}.")
        output_result_file(self._pid, scan_repos.dict(by_alias=True))


    def get_log_backup_params(self):
        # 先获取最近一次的日志副本，获取不到，获取上一次副本
        previous_copy = get_previous_copy_info(self._protect_obj, [RpcParamKey.LOG_COPY], self._job_id)
        last_copy = get_previous_copy_info(
            self._protect_obj, [RpcParamKey.FULL_COPY, RpcParamKey.DIFF_COPY,
                                RpcParamKey.INCREMENT_COPY, RpcParamKey.LOG_COPY],
            self._job_id)
        logger.info(f"get_log_backup_params, previous_copy: {json.dumps(previous_copy)},"
                    f" last_copy: {json.dumps(last_copy)}")
        if not previous_copy:
            previous_copy = last_copy
        else:
            # 判断日志副本和最近一次副本的reset_logs_id是否一致，不一致(说明恢复过)，取最近一次副本
            if last_copy and str(previous_copy.get('extendInfo', {}).get('resetlogs_id', "")).strip() != str(
                    last_copy.get('extendInfo', {}).get('resetlogs_id', "")).strip():
                previous_copy = last_copy
            # 如果上一次日志副本和当前需要备份的日志副本备份模式不一致（是否存储快照备份），取最近一次副本，
            elif last_copy and self.is_need_get_recent_copy_by_storage_snapshot(previous_copy):
                previous_copy = last_copy
            # 如果上一次日志副本的endSCN大于当前的currentSCN
            elif last_copy and not self.end_scn_check(previous_copy):
                previous_copy = last_copy

        if previous_copy.get("type", "") == CopyDataTypeEnum.LOG_COPY:
            backup_scn = previous_copy.get('extendInfo', {}).get("endSCN", "")
            file_path = os.path.join(self._meta_area, LogMetaFileName.LOG_LOST_FROM_SCN)
            result_info = read_tmp_json_file(file_path)
            log_lost_from_scn = result_info.get(str(backup_scn), '')
            logger.info(f"get backup_scn {backup_scn}, result_info {result_info}, {log_lost_from_scn}")
            if log_lost_from_scn:
                # 从最近的日志副本开始备份存在日志丢失，改从最近的数据副本开始备份
                previous_copy = last_copy
                backup_scn = previous_copy.get('extendInfo', {}).get("last_backup_scn", "")
                logger.info(f"get last_backup_scn {backup_scn}")
        else:
            backup_scn = previous_copy.get('extendInfo', {}).get("last_backup_scn", "")

        reset_logs_id = previous_copy.get('extendInfo', {}).get('resetlogs_id', "")
        self.p_file_param = previous_copy.get('extendInfo', {}).get('pfileParams', "")
        logger.info(f"Got param backup_scn:{backup_scn}, reset_logs_id:{reset_logs_id}, "
                    f"pid: {self._pid}, job_id: {self._job_id}, sub_job_id: {self._sub_job_id}.")
        return backup_scn, reset_logs_id

    def end_scn_check(self, previous_log_copy):
        backup_scn = previous_log_copy.get('extendInfo', {}).get("endSCN", "")
        logger.info(f"last log copy backup scn:{backup_scn}")
        login_params = self.build_login_param()
        current_scn = get_current_scn(self._pid, login_params)
        logger.info(f"current_scn:{current_scn},backup_scn:{backup_scn}")
        if current_scn and int(current_scn) > int(backup_scn):
            return True
        return False

    def build_login_param(self):
        return {
            'instance_name': self._db_instance_name,
            'db_install_user': self._install_user_name,
            'auth_pwd': f"job_protectObject_auth_authPwd_{self._pid}",
            'auth_key': f"job_protectObject_auth_authKey_{self._pid}"
        }

    def is_need_get_recent_copy_by_storage_snapshot(self, previous_log_copy):
        # 如果当前日志副本和上一次日志副本模式不一致（是否存储快照备份）返回true
        copy_transport_mode = previous_log_copy.get("extendInfo", {}) \
            .get(ORACLEJsonConstant.COPY_TRANSPORT_MODE, "")
        storage_snap_flag = self.is_storage_snapshot_backup()
        logger.info(f"copy_transport_mode:{copy_transport_mode},storage_snap_flag:{storage_snap_flag}")
        if copy_transport_mode == BackupTransportMode.STORAGE_LAYER and not storage_snap_flag:
            return True
        if not copy_transport_mode and storage_snap_flag:
            return True
        return False

    def backup_log(self):
        """
        日志备份
        """
        params = self.get_backup_params()
        if platform.system().lower() == "windows":
            exit_status = OracleWindowsBackupService(self._pid, self._job_id, params).backup_log_windows()
        else:
            child = pexpect.spawn(
                f"sh {PluginPathConstant.SCRIPTS_PATH.value}/{RootCommand.ROOT_ORACLE_NATIVE_ARCHIVE_BACKUP.value} "
                f"{PluginPathConstant.SCRIPTS_PATH.value} {self._job_id} {len(params)}", encoding='utf-8', timeout=None)
            send_params_to_pexpect(params, child)
            child.expect(pexpect.EOF)
            child.close()
            exit_status = child.exitstatus
            clear(params.get('Password'))
            clear(params.get('ASMPassword'))
        if exit_status != ExecuteResultEnum.SUCCESS.value:
            # shell脚本返回的错误码需要转换为omrp上的错误码界面才能正确显示错误信息
            log_detail_param = []
            if exit_status == ScriptExitCode.ERROR_ORACLE_HAS_LOG_LOST:
                # 因日志缺失导致的备份失败，记录backup from_scn到log_lost_from_scn.json文件
                logger.info(f"backup_log ERROR_ORACLE_HAS_LOG_LOST :{self.backup_scn}")
                file_path = os.path.join(self._meta_area, LogMetaFileName.LOG_LOST_FROM_SCN)
                reset_info = read_tmp_json_file(file_path)
                reset_info[self.backup_scn] = '1'
                write_tmp_json_file(file_path, self._job_id, reset_info)
                first_change = self.get_archivelog_first_change_from_file()
                log_detail_param = [f"{self.backup_scn}", f"{first_change}"]
            error_code, contents = convert_error_code(self._job_id, self._job_id, exit_status)
            self.set_backup_result(SubJobStatusEnum.FAILED.value, error_code, contents, log_detail_param)
            logger.error(f"Execute archive backup script failed, exit_status: {exit_status}, pid: {self._pid}, "
                         f"job_id: {self._job_id}, sub_job_id: {self._sub_job_id}.")
        else:
            # 日志备份成功，清理log_lost_from_scn.json文件
            log_lost_from_scn = os.path.join(self._meta_area, LogMetaFileName.LOG_LOST_FROM_SCN)
            if os.path.exists(log_lost_from_scn):
                logger.info(f"backup_log clear log_lost_from_scn")
                os.remove(log_lost_from_scn)
            # 备份脚本执行完成，需要在cache仓里创建一个任务id的目录，来解决故障的时候agent多次下发同一个备份任务
            result_file = os.path.join(self._cache_area, self._sub_job_id + "_backup_task_success")
            touch_file(result_file)
            self.call_report_backup_copy()

        output_result_file(self._pid, self.get_action_result().dict(by_alias=True))

    def get_archivelog_first_change_from_file(self):
        if platform.system().lower() == "windows":
            result_file_name = f"{PluginPathConstant.WINDOWS_TMP_PATH}/" \
                               f"check_archive_log_first_change_{self._job_id}.txt"
        else:
            result_file_name = f"{PluginPathConstant.STMP_PATH}/check_archive_log_first_change_{self._job_id}.txt"
        if not os.path.exists(result_file_name):
            logger.warn(f"get_archivelog_first_change_from_file {result_file_name} not exists,")
            return ''
        if platform.system().lower() == "windows":
            encoding = locale.getdefaultlocale()[1]
            first_change = read_result_file(result_file_name, encoding=encoding)
        else:
            first_change = read_result_file(result_file_name)
        delete_file(result_file_name)
        logger.info(f"get_archivelog_first_change_from_file: {first_change}")
        return first_change.strip()

    def backup_data(self):
        """
        数据备份
        """
        params = self.get_backup_params()
        if self.is_storage_snapshot_backup():
            logger.info("start to backup by snapshot")
            exit_status = OracleStorageBackupService(self._pid, self._job_id, self._param_dict, params). \
                storage_snapshot_backup(self._sub_job_id)
        else:
            exit_status = self.exec_backup_data(params)
        self.deal_backup_exit_status(exit_status)
        output_result_file(self._pid, self.get_action_result().dict(by_alias=True))

    def exec_backup_data(self, params):
        if self.sub_type == OracleDataBaseType.ORACLE_PDB:
            try:
                exit_status = OraclePdbBackupService(self._pid, self._job_id, params).backup_data_pdb()
            except Exception as err_info:
                logger.error(f"backup_data_pdb err_info {err_info}.")
                return ScriptExitCode.ERROR_PARAM_INVALID
        else:
            if platform.system().lower() == "windows":
                exit_status = OracleWindowsBackupService(self._pid, self._job_id, params).backup_data_windows()
            else:
                child = pexpect.spawn(
                    f"sh {PluginPathConstant.SCRIPTS_PATH.value}/"
                    f"{RootCommand.ROOT_COMMAND_ORACLE_NATIVE_BACKUP.value} "
                    f"{PluginPathConstant.SCRIPTS_PATH.value} {self._job_id} {len(params)}", encoding='utf-8',
                    timeout=None)
                send_params_to_pexpect(params, child)
                child.expect(pexpect.EOF)
                child.close()
                exit_status = child.exitstatus
                clear(params.get('Password'))
                clear(params.get('ASMPassword'))
        return exit_status

    def deal_backup_exit_status(self, exit_status):
        if exit_status != ExecuteResultEnum.SUCCESS.value:
            # shell脚本返回的错误码需要转换为omrp上的错误码界面才能正确显示错误信息
            error_code, contents = convert_error_code(self._job_id, self._job_id, exit_status)
            self.set_backup_result(SubJobStatusEnum.FAILED.value, error_code, contents)
            logger.error(f"Execute Data backup script failed, exit_status: {exit_status}, pid: {self._pid}, "
                         f"job_id: {self._job_id}, sub_job_id: {self._sub_job_id}.")
        else:
            # 备份脚本执行完成，需要在cache仓里创建一个任务id的目录，来解决故障的时候agent多次下发同一个备份任务
            result_file = os.path.join(self._cache_area, self._sub_job_id + "_backup_task_success")
            touch_file(result_file)
            self.call_report_backup_copy()
            if self.check_archive_log_lost:
                log_detail = LogDetail(logInfo=OracleReportLabel.PLUGIN_CHECK_BACKUP_ARCHIVELOG_LOST_LABEL,
                                       logLevel=DBLogLevel.WARN)
                self.report_job_details_linux_windows(self._pid, self._job_id, self._sub_job_id, log_detail,
                                                      SubJobStatusEnum.RUNNING.value)
            if not self.check_archive_log_lost and self.backup_time:
                log_detail = LogDetail(logInfo=OracleReportLabel.PLUGIN_FULL_BACKUP_RESTORE_TIMESTAMP_LABEL,
                                       logInfoParam=[str(self.backup_time)],
                                       logLevel=DBLogLevel.INFO)
                self.report_job_details_linux_windows(self._pid, self._job_id, self._sub_job_id, log_detail,
                                                      SubJobStatusEnum.RUNNING.value)

    def call_report_backup_copy(self):
        backup_copy_result = self.report_backup_copy()
        if backup_copy_result:
            # 上报副本完成，需要在cache仓里创建一个任务id的目录，来解决故障模式下rpc接口上报失败的问题
            result_file = os.path.join(self._cache_area, self._sub_job_id + "_report_backup_copy_success")
            touch_file(result_file)

            self.set_backup_result(SubJobStatusEnum.COMPLETED.value, "", "")
            if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
                logger.info(f"Archive back up task is success. pid: {self._pid}, job_id: {self._job_id}, "
                            f"sub_job_id: {self._sub_job_id}.")
            else:
                logger.info(f"Data back up task is success. pid: {self._pid}, job_id: {self._job_id}, "
                            f"sub_job_id: {self._sub_job_id}.")
        else:
            if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
                logger.info(f"Archive back up task is failed. pid: {self._pid}, job_id: {self._job_id}, "
                            f"sub_job_id: {self._sub_job_id}.")

                self.set_backup_result(SubJobStatusEnum.FAILED.value, ErrorCode.PLUGIN_CANNOT_BACKUP_ERR.value,
                                       "Report archive backup copy failed.")
            else:
                logger.info(f"Data back up task is failed. pid: {self._pid}, job_id: {self._job_id}, "
                            f"sub_job_id: {self._sub_job_id}.")

                self.set_backup_result(SubJobStatusEnum.FAILED.value, ErrorCode.PLUGIN_CANNOT_BACKUP_ERR.value,
                                       "Report data backup copy failed.")

    def backup_progress(self):
        file_path = os.path.join(self._cache_area, f"BackupProgress_{self._sub_job_id}")
        progress_info = read_tmp_json_file(file_path)
        logger.info(f"Backup progress info is: {progress_info}, pid: {self._pid}, job_id: {self._job_id}.")
        status = progress_info.get("status", SubJobStatusEnum.RUNNING.value)
        data_size = 0
        if status == SubJobStatusEnum.COMPLETED.value:
            progress = OracleProgress.PROGRESS_ONE_HUNDRED
            # 集群需要获取instance_name
            if self.is_cluster_type():
                self.cluster_get_oracle_instance_name()

            if self.is_storage_snapshot_backup() and self._backup_type != BackupTypeEnum.LOG_BACKUP.value:
                # 存储快照备份大小从lun记录文件中获取
                data_size = OracleStorageBackupService(self._pid, self._job_id,
                                                       self._param_dict).query_backup_copy_size()
            elif self._param_dict.get("subJob", {}).get("jobName", "") == BackupSubJobName.BACK_TASK:
                # 计算副本大小
                data_size = query_backup_copy_size(self._pid, self._db_instance_name, self._install_user_name,
                                                   self._backup_type)
            logger.info(f"Success get backup copy size: {data_size}.")
        else:
            progress = OracleProgress.PROGRESS_FIFTY
        progress_record = os.path.realpath(os.path.join(self._cache_area, f"progress.done_{self._sub_job_id}"))

        if status == SubJobStatusEnum.FAILED.value:
            error_code = progress_info.get("error_code", ErrorCode.BACKUP_FAILED.value)
            contents = progress_info.get("message", "")
            log_detail = LogDetail(logInfo=ReportDBLabel.BACKUP_SUB_FAILED, logInfoParam=[self._sub_job_id],
                                   logLevel=DBLogLevel.ERROR.value, logDetail=error_code,
                                   logDetailParam=progress_info.get("log_detail_param", []), logDetailInfo=[contents])
            output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, taskStatus=status,
                                   progress=progress, logDetail=[log_detail])
            if self.is_storage_snapshot_backup():
                # 删除lun备份进度临时文件
                OracleStorageBackupService(self._pid, self._job_id, self._param_dict). \
                    delete_lun_sub_job_progress_file_by_sub_job_id(self._sub_job_id)
        # 如果子任务job log已上报，则不需要再次上报
        elif not os.path.exists(progress_record):
            touch_file(progress_record)
            if self.is_storage_snapshot_backup() and self._backup_type != BackupTypeEnum.LOG_BACKUP.value:
                # 存储快照备份label与普通模式区分
                log_detail = LogDetail(logInfo=OracleReportLabel.STORAGE_SNAPSHOT_BACKUP_SUB_START_COPY,
                                       logInfoParam=[self._sub_job_id, self.get_snapshot_wwn_list()],
                                       logLevel=DBLogLevel.INFO.value)
                log_detail1 = LogDetail(logInfo=OracleReportLabel.STORAGE_SNAPSHOT_BACKUP_TIPS, logInfoParam=[],
                                        logLevel=DBLogLevel.INFO.value)
                output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, taskStatus=status,
                                       progress=progress, logDetail=[log_detail, log_detail1], dataSize=data_size)
            else:
                log_detail = LogDetail(logInfo=ReportDBLabel.BACKUP_SUB_START_COPY, logInfoParam=[self._sub_job_id],
                                       logLevel=DBLogLevel.INFO.value)
                output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, taskStatus=status,
                                       progress=progress, logDetail=[log_detail], dataSize=data_size)
        else:
            output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, taskStatus=status,
                                   progress=progress, dataSize=data_size)
        output_result_file(self._pid, output.dict(by_alias=True))
        logger.info(f"SubJobDetails: {output.dict(by_alias=True)}, pid: {self._pid}, job_id: {self._job_id}.")

    def get_snapshot_wwn_list(self):
        file_path = os.path.join(self._cache_area, f'snapshots_{self._job_id}')
        snapshots = read_tmp_json_file(file_path)
        snapshot_index = self._param_dict.get('subJob', {}).get('jobInfo', '')
        snapshot_index_list = snapshot_index.strip(']').strip('[').split(',')
        logger.info(f"get_snapshot_wwn_list: {snapshot_index_list}.")
        snapshot_wwn_list = []
        for index in snapshot_index_list:
            snapshot_wwn_list.append(snapshots[int(index)].get('parent_wwn'))
        return ';'.join(snapshot_wwn_list)

    def umount_bind_backup_path(self):
        """
        解绑定备份路径
        """
        # 日志备份则卸载日志仓
        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            if platform.system().lower() == "windows":
                umount_bind_path(PluginPathConstant.WINDOWS_ORACLE_LOG_FILE_PATH.value + self._db_uuid)
            else:
                umount_bind_path(PluginPathConstant.ORACLE_LOG_FILE_PATH.value + self._db_uuid)
            return
        # 数据备份则卸载数据仓
        for data_area_path in self._data_area:
            logic_ip_path = data_area_path.split('/')[-1]
            if platform.system().lower() == "windows":
                logic_ip_path = data_area_path.split("\\")[-1]
                umount_bind_path(PluginPathConstant.WINDOWS_ORACLE_DATA_FILE_PATH.value + logic_ip_path + self._db_uuid)
            else:
                umount_bind_path(PluginPathConstant.ORACLE_DATA_FILE_PATH.value + logic_ip_path + self._db_uuid)

    def backup_post_job(self):
        logger.info(
            f"Execute backup post job function, pid: {self._pid}, job_id: {self._job_id}.")
        file_path = os.path.join(self._cache_area, f"BackupProgress_{self._sub_job_id}")
        if os.path.exists(file_path):
            backup_result = read_tmp_json_file(file_path)
            if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
                backup_path = self._cache_area
            else:
                backup_path = self._data_area[0]
            if backup_result.get("status", SubJobStatusEnum.INITIALIZING.value) != SubJobStatusEnum.COMPLETED.value \
                    or os.path.exists(os.path.join(self._cache_area, "abort.done")):
                logger.info(f"Task failed or Backup Task is stop, pid: {self._pid}, job_id: {self._job_id}")
                # 任务失败或终止清理备份数据
                clear_repository_dir(backup_path, self._job_id)
                logger.info(
                    f"Succeed to clean backup storage: {backup_path}, pid: {self._pid}, job_id: {self._job_id}.")
        snapshot = {}
        if self.is_storage_snapshot_backup() and self._backup_type != BackupTypeEnum.LOG_BACKUP.value:
            backup_job_result = self._param_dict.get("backupJobResult", -1)
            if backup_job_result == BackupJobResult.SUCCESS:
                # 如果是存储快照备份，将前一个快照删除
                logger.info("start delete pre lun")
                OracleStorageBackupService(self._pid, self._job_id,
                                           self._param_dict).delete_pre_lun_in_backup_post_job()
            elif backup_job_result > BackupJobResult.SUCCESS and backup_job_result != 2:
                # 如果是存储快照备份，将当前lun快照删除
                logger.info("start delete current lun")
                OracleStorageBackupService(self._pid, self._job_id,
                                           self._param_dict).delete_current_lun_in_backup_post_job()
            elif backup_job_result == 2:
                # 中止任务需要解挂载仓然后小工具执行失败以后才能执行，先获取内容，在最后执行删除
                snapshot = OracleStorageBackupService(self._pid, self._job_id,
                                                      self._param_dict).get_current_lun_snapshot()
        result_file = os.path.join(self._cache_area, f"BackupPostJobProgress_{self._sub_job_id}")
        touch_file(result_file)
        self.umount_bind_backup_path()
        OracleStorageBackupService(self._pid, self._job_id,
                                   self._param_dict).delete_current_lun_in_backup_post_job_by_abort_job(snapshot)
        # 备份存储快照失败时用的到缓存仓，需要后面再删除
        if os.path.exists(self._cache_area):
            logger.info(f"Start clean cache repository, pid: {self._pid}, job_id: {self._job_id}.")
            clear_repository_dir(self._cache_area, self._job_id)
        # 在后置任务里删除result_tmp文件
        if platform.system().lower() == "windows":
            stmp_file_path = PluginPathConstant.WINDOWS_TMP_PATH.value
        else:
            stmp_file_path = PluginPathConstant.STMP_PATH.value
        result_file_name = f"{stmp_file_path}/{PluginPathConstant.RESULT_TMP_FILE_PREFIX.value}{self._job_id}"
        delete_file(result_file_name)
        output_result_file(self._pid, self.get_action_result().dict(by_alias=True))

    def backup_post_job_progress(self):
        logger.info(f"Execute backup_post_job_progress, pid: {self._pid}, job_id: {self._job_id}.")

        # 当前根据是否存在BackupPostJobProgress文件来上报后置任务进度
        progress = OracleProgress.PROGRESS_ONE_HUNDRED
        job_status = SubJobStatusEnum.COMPLETED.value
        file_path = os.path.join(self._cache_area, f"BackupPostJobProgress_{self._sub_job_id}")

        if not os.path.exists(file_path):
            job_status = SubJobStatusEnum.FAILED.value
            logger.error(
                f"Failed to query progress, job status: {job_status}, pid: {self._pid}, job_id: {self._job_id}.")

        logger.info(f"Succeed to query progress, job status: {job_status}, pid: {self._pid}, job_id: {self._job_id}.")
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                               taskStatus=job_status, progress=progress)

        output_result_file(self._pid, output.dict(by_alias=True))

    def get_report_copy_content(self, result, stmp_file_path):
        """
        组装上报的副本内容
        :param result: 脚本备份生成的结果文件
        :param stmp_file_path: 结果文件位置
        :return: 上报的副本内容
        """
        extend_content = {}
        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            for row in result.splitlines():
                columns = row.split(";")
                extend_content['beginSCN'] = int(columns[1])
                extend_content['endSCN'] = int(columns[3])
                extend_content['beginTime'] = int(columns[2])
                extend_content['endTime'] = int(columns[4])
                extend_content['resetlogs_id'] = int(columns[5])
                self.backup_resetlogs_id = str(extend_content['resetlogs_id'])
                extend_content['backupTime'] = int(columns[2])
                extend_content['pfileParams'] = self.p_file_param
        else:
            for row in result.splitlines():
                columns = row.split(";")
                extend_content = self.get_extend_content_from_columns(columns, extend_content, stmp_file_path)
        return extend_content

    def get_extend_content_from_columns(self, columns, extend_content, stmp_file_path):
        if columns[0] == "last_backup_scn":
            extend_content['scn'] = int(columns[1])
        if columns[0] == "timeStamp":
            extend_content['backupTime'] = int(columns[1])
            self.backup_time = unix_timestamp_to_date(str(columns[1]))
        if columns[0] == "resetlogs_id":
            self.backup_resetlogs_id = str(columns[1])
        if columns[0] in ["last_backup_scn", "timeStamp", "resetlogs_id", "BackupLevel"]:
            extend_content[columns[0]] = columns[1]
        # 上报pfile和filelist内容
        if columns[0] == "pfile" and self.sub_type != OracleDataBaseType.ORACLE_PDB:
            extend_content["pfileParams"] = {}
            file_name = f"{stmp_file_path}/{columns[1]}"
            add_file_content(extend_content["pfileParams"], file_name, "=")
        if columns[0] == "filelist":
            extend_content["filelistParams"] = {}
            file_name = f"{stmp_file_path}/{columns[1]}"
            add_file_content(extend_content["filelistParams"], file_name, ";")
        if columns[0] == "check_archive_log_lost":
            if columns[1] == "true":
                self.check_archive_log_lost = True
                extend_content['canRestore'] = False
        return extend_content

    def report_backup_copy(self):
        """
        上报副本
        :return: 副本是否上报成功
        """
        logger.info(f"Start execute report copy function, enc_algo: {self.enc_algo}, pid: {self._pid}, "
                    f"job_id: {self._job_id}, sub_job_id: {self._sub_job_id}.")
        if not self.is_storage_snapshot_backup():
            ret, extend_content = self.write_extend_content()
            extend_content[ORACLEJsonConstant.COPY_TRANSPORT_MODE] = ""
            if not ret:
                return False
        else:
            extend_content = self.write_snapshot_extend_content()
            extend_content[ORACLEJsonConstant.COPY_TRANSPORT_MODE] = BackupTransportMode.STORAGE_LAYER.value

        # 上报enc_algo和encrypted_key
        extend_content['backup_algo'] = self.enc_algo
        extend_content['backup_algo_value'] = self.encrypted_key
        extend_content['delete_archived_log'] = self._delete_archived_log
        extend_content['delete_before_time'] = self._delete_before_time
        extend_content['delete_before_time_unit'] = self._delete_before_time_unit
        extend_content['can_table_restore'] = self.can_table_restore
        extend_content['block_change_tracking'] = self.block_change_tracking
        extend_content['pdb_list'] = json.loads(self._protect_obj_extend_info.get('pdb', '[]'))
        logger.info(f"delete_archived_log {self._delete_archived_log}, before_time {self._delete_before_time},"
                    f"time_unit {self._delete_before_time_unit}")
        # 获取副本信息
        json_copy = {}
        json_copy['extendInfo'] = extend_content
        json_copy["repositories"] = self._log_area \
            if self._backup_type == BackupTypeEnum.LOG_BACKUP.value else self._data_area[0]

        copy_info = {"copy": json_copy, "jobId": self._job_id}
        try:
            invoke_rpc_tool_interface(self._sub_job_id, RpcParamKey.REPORT_COPY_INFO, copy_info)
        except Exception as err_info:
            logger.error(f"Report copy info fail, err_info: {err_info}, pid: {self._pid}, job_id: {self._job_id}, "
                         f"sub_job_id: {self._sub_job_id}.")
            return False
        if self.sub_type != OracleDataBaseType.ORACLE_PDB:
            self.save_file_info()
        logger.info(f"Report backup copy info success.")
        return True

    def abort_job(self):
        """
        终止任务
        """
        # 中断任务创建中断标识文件
        abort_file = os.path.join(self._cache_area, "abort.ing")
        touch_file(abort_file)
        pid_list = psutil.pids()

        if platform.system().lower() == "windows":
            for pid in pid_list:
                process = psutil.Process(pid)
                try:
                    cmd = process.cmdline()
                except Exception as e:
                    logger.error(e, exc_info=True)
                    continue
                # 不能把abort_job这个任务杀掉(不能杀死自己)
                if not cmd:
                    continue
                if 'python' == cmd[0] and self._pid in cmd:
                    continue
                if 'python' == cmd[0] and self._job_id in cmd:
                    process.kill()
                    logger.info(f"The backup task has been terminated, job_id: {self._job_id}.")
                    break
        else:
            for pid in pid_list:
                process = psutil.Process(pid)
                cmd = process.cmdline()
                # 不能把abort_job这个任务杀掉(不能杀死自己)
                if 'python3' in cmd and self._pid in cmd:
                    continue
                if 'python3' in cmd and self._job_id in cmd:
                    process.kill()
                    logger.info(f"The backup task has been terminated, job_id: {self._job_id}.")
                    break
                    # 删除lun备份进度临时文件
            if self.is_storage_snapshot_backup() and self._backup_type != BackupTypeEnum.LOG_BACKUP.value:
                # 创建临时文件告知小工具任务终止
                stop_file = f"{PluginPathConstant.LUN_TOOL_STOP_FILE}_{self._job_id}"
                logger.info(f"{stop_file}")
                su_exec_touch_cmd(stop_file, 'root')
                logger.info("delete lun progress file")

                OracleStorageBackupService(self._pid, self._job_id,
                                           self._param_dict).delete_lun_sub_job_progress_file_by_job_id()

        os.rename(abort_file, os.path.join(self._cache_area, "abort.done"))
        logger.info(f"Succeed to abort backup job, job_id: {self._job_id}.")
        output_result_file(self._pid, self.get_action_result().dict(by_alias=True))

    def report_abort_job_progress(self):
        """
        上报终止任务进程
        """
        if os.path.exists(os.path.join(self._cache_area, "abort.ing")):
            status = SubJobStatusEnum.ABORTING.value
        elif os.path.exists(os.path.join(self._cache_area, "abort.done")):
            status = SubJobStatusEnum.ABORTED.value
        else:
            status = SubJobStatusEnum.ABORTED_FAILED.value
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                               progress=OracleProgress.PROGRESS_ONE_HUNDRED,
                               taskStatus=status)
        logger.info(f"Query abort job progress: {output.dict(by_alias=True)}, job_id: {self._job_id}.")
        output_result_file(self._pid, output.dict(by_alias=True))

    def check_datafile_consistency(self):
        logger.info(f'job id: {self._job_id}, start to check datafile consistency')
        # 数据库中查询的数据文件
        datafile_set = get_data_file_set(self._pid, self._db_instance_name, self._install_user_name)
        # addition datafiles中获取数据文件
        logger.info(f'job id: {self._job_id}, start to read additional db file')
        additional_path = os.path.join(self._data_area[0], "additional", "dbfiles")
        addition_datafile_set, _ = read_additional_db_file(additional_path, ';')
        logger.info(f'datafile_set: {datafile_set}, addition_datafile_set: {addition_datafile_set}')
        if datafile_set != addition_datafile_set:
            logger.error(f'datafile is not same')
            return False
        # 判断数据文件在文件系统中是否存在
        return exist_addition_file(self._job_id, self._data_area[0], addition_datafile_set)

    def check_addition_file_exist(self):
        # addition datafiles中获取数据文件
        additional_path = os.path.join(self._data_area[0], "additional", "dbfiles")
        addition_datafile_set, _ = read_additional_db_file(additional_path, ';')
        if not addition_datafile_set:
            logger.warning(f'job id: {self._job_id}, addition is empty')
            return False
        return exist_addition_file(self._job_id, self._data_area[0], addition_datafile_set)

    def write_extend_content(self):
        # 获取临时result_file的路径
        if platform.system().lower() == "windows":
            stmp_file_path = PluginPathConstant.WINDOWS_TMP_PATH.value
        else:
            stmp_file_path = PluginPathConstant.STMP_PATH.value
        result_file_name = f"{stmp_file_path}/{PluginPathConstant.RESULT_TMP_FILE_PREFIX.value}{self._job_id}"

        if platform.system().lower() == "windows":
            encoding = locale.getdefaultlocale()[1]
            result = read_result_file(result_file_name, encoding=encoding)
        else:
            result = read_result_file(result_file_name)
        if len(result) == 0:
            logger.info(f"Read result file is empty, pid: {self._pid}, job_id: {self._job_id}, "
                        f"sub_job_id: {self._sub_job_id}.")
            return False, {}

        logger.info(f"Read Backup Result File, result_file_name is: {result_file_name}, content is: {result}, "
                    f"pid: {self._pid}, job_id: {self._job_id}, "f"sub_job_id: {self._sub_job_id}.")
        try:
            extend_content = self.get_report_copy_content(result, stmp_file_path)
        except Exception as ex:
            logger.exception(ex)
            logger.error(f"get_report_copy_content exception {str(ex)}.")
            return False, {}
        return True, extend_content

    def write_snapshot_extend_content(self):
        extend_content = {}
        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            ret, extend_content = self.write_extend_content()
            previous_data_copy = get_previous_copy_info(self._protect_obj,
                                                        [RpcParamKey.FULL_COPY, RpcParamKey.INCREMENT_COPY,
                                                         RpcParamKey.DIFF_COPY], self._job_id)
            new_loc_restore = previous_data_copy.get("extendInfo", {}) \
                .get(ORACLEJsonConstant.NEW_LOC_RESTORE, False)
            logger.info(f"write_snapshot_extend_content new_loc_restore is {new_loc_restore}")
            extend_content[ORACLEJsonConstant.NEW_LOC_RESTORE] = new_loc_restore
            return extend_content
        file_path = os.path.join(self._cache_area, f'storage_index_{self._job_id}')
        storage_index = read_tmp_json_file(file_path).get('storage_index', -1)
        storages = json.loads(self._protect_obj_auth_extend_info.get("storages", "[]"))
        extend_content[ORACLEJsonConstant.EXEC_STORAGE] = storages[storage_index].get('ipList')
        logger.info(f"write_snapshot_extend_content storage_index is {storage_index}")
        file_path = os.path.join(self._cache_area, f'snapshots_{self._job_id}')
        snapshots = read_tmp_json_file(file_path)
        extend_content[ORACLEJsonConstant.COPY_LUN_SNAPSHOTS] = json.dumps(snapshots)
        file_path = os.path.join(self._cache_area, f'disk_infos_{self._job_id}')
        agent_disk_info = read_tmp_json_file(file_path)
        logger.info(f"report_backup_copy agent_disk_info is {agent_disk_info}")
        extend_content[ORACLEJsonConstant.COPY_DISK_INFOS] = agent_disk_info.get('disk_infos')
        extend_content[ORACLEJsonConstant.COPY_AGENT_UUID] = agent_disk_info.get('agent_uuid')
        extend_content[ORACLEJsonConstant.NEW_LOC_RESTORE] = agent_disk_info.get('new_loc_restore')
        # 获取spfile值
        file_path = os.path.join(self._cache_area, f'spfile_{self._job_id}')
        spfile = read_tmp_json_file(file_path)
        logger.info(f"report_backup_copy spfile is {spfile}")
        extend_content[ORACLEJsonConstant.COPY_SPFILE_PATH] = spfile
        # 获取asm_spfile值
        file_path = os.path.join(self._cache_area, f'asm_spfile_{self._job_id}')
        asm_spfile = read_tmp_json_file(file_path)
        logger.info(f"report_backup_copy asm_spfile is {asm_spfile}")
        if asm_spfile:
            extend_content[ORACLEJsonConstant.ASM_SPFILE_PATH] = asm_spfile

        # 获取result_content值
        file_path = os.path.join(self._cache_area, f'result_content{self._job_id}')
        result_content = read_tmp_json_file(file_path)
        extend_content.update(result_content)
        if result_content.get("last_backup_scn", ""):
            extend_content['scn'] = result_content.get("last_backup_scn")
        if result_content.get("timeStamp", ""):
            extend_content['backupTime'] = result_content.get("timeStamp")
        if result_content.get("resetlogs_id", ""):
            self.backup_resetlogs_id = result_content.get("resetlogs_id")

        return extend_content

    def save_copy_table_tree(self):
        """
        记录副本的表信息，用户表级恢复
        """
        logger.info(f"save_copy_table_tree start")
        oracle_version = self._protect_obj_extend_info.get("version", {})
        oracle_major_version = oracle_version.split(".")[0]
        # 12以上的副本支持表级恢复，快照备份不支持表级恢复
        if int(oracle_major_version) < 12 or self.is_storage_snapshot_backup() \
                or check_database_is_dg(self._pid, self._db_instance_name, self._install_user_name):
            logger.info("can not table restore")
            self.can_table_restore = False
            return
        # 查询是否开启CDB
        # CBD开启，查询PDB信息，只看open的
        login_params = self.build_login_param()
        cbd_is_open = check_cdb_is_open(self._pid, login_params)
        if cbd_is_open:
            pdbs_infos = list_pdbs(self._pid, self._db_instance_name, self._install_user_name, TaskType.BACKUP_SERVICE)
            if not pdbs_infos:
                logger.error(f"The instance did not find pdb information, instance_name: {self._db_instance_name}")
                self.can_table_restore = False
                log_detail = LogDetail(logInfo=OracleReportLabel.ORACLE_BACKUP_PLEASE_CHECK_THE_PDB_STATUS_LABEL,
                                       logLevel=DBLogLevel.WARN)
                self.report_job_details_linux_windows(self._pid, self._job_id, self._sub_job_id, log_detail,
                                                      SubJobStatusEnum.RUNNING.value)
            for pdb_info in pdbs_infos:
                pdb_name = pdb_info.get("NAME")
                params = {
                    "path": self._meta_area,
                    "name": pdb_name,
                    "type": MetaDataConstant.TYPE_FOLDER,
                    "parent_path": "/",
                    "extend_info": "",
                    "parent_uuid": 0
                }
                pdb_parent_uuid = MetaDataSqliteService.save_data(params)
                self.set_table_is_has_foreign_key_dict(pdb_name)
                self.save_meta_table_info(pdb_name, pdb_parent_uuid)
                self.table_is_has_foreign_key_dict.clear()
        else:
            # CDB未开启，直接查询表空间，表
            self.set_table_is_has_foreign_key_dict('')
            self.save_meta_table_info("", 0)
            self.table_is_has_foreign_key_dict.clear()

    def set_table_is_has_foreign_key_dict(self, pdb_name):
        login_params = self.build_login_param()
        self.table_is_has_foreign_key_dict = get_table_is_has_foreign_key_dict(self._pid, login_params, pdb_name)

    def save_meta_table_info(self, pdb_name, pdb_parent_uuid):
        query_results = list_user(self._pid, self._db_instance_name, self._install_user_name, pdb_name,
                                  TaskType.BACKUP_SERVICE)
        for query_result in query_results:
            owner = query_result.get("USERNAME")
            if not owner:
                continue
            table_param_list = ListTableByOwnerParam(owner=owner, pdb_name=pdb_name, page=PageParamConst.PAGE,
                                                     page_size=PageParamConst.PAGE_SIZE)
            params = {
                "path": self._meta_area,
                "name": owner,
                "type": MetaDataConstant.TYPE_FOLDER,
                "parent_path": f"/{pdb_name}",
                "extend_info": "",
                "parent_uuid": pdb_parent_uuid
            }
            table_parent_uuid = MetaDataSqliteService.save_data(params)
            while True:
                tables = list_table_by_owner(self._pid, self._db_instance_name, self._install_user_name,
                                             table_param_list)
                if tables:
                    self.save_table_info(pdb_name, tables, table_parent_uuid, owner)
                else:
                    logger.info(f"There is no table info that needs to be saved, owner: {owner}")
                    break
                if len(tables) < table_param_list.page_size:
                    logger.info(f"There is no table info that needs to be saved, owner: {owner}")
                    break
                table_param_list.page += 1

    def save_table_info(self, pdb_name, tables, table_parent_uuid, owner):
        params_list = []
        login_params = self.build_login_param()
        temporary_objects = list_temporary_objects(self._pid, pdb_name, login_params)
        temporary_objects = [temporary_object.get('OBJECT_NAME') for temporary_object in temporary_objects]
        for table in tables:
            table_name = table.get("TABLE_NAME")
            user_name = table.get("OWNER")
            table_space = table.get("TABLESPACE_NAME")
            is_has_foreign_key = True if self.table_is_has_foreign_key_dict.get(table_name, '') else ''
            if table_name in temporary_objects:
                continue
            extend_info = {
                'user_name': user_name,
                'pdb_name': pdb_name,
                "table_space": table_space,
                "is_has_foreign_key": is_has_foreign_key
            }
            parent_path = f"/{owner}"
            if pdb_name:
                parent_path = f"/{pdb_name}/{owner}"
            params = {
                "path": self._meta_area,
                "name": table_name,
                "type": MetaDataConstant.TYPE_FILE,
                "parent_path": parent_path,
                "extend_info": json.dumps(extend_info),
                "parent_uuid": table_parent_uuid
            }
            params_list.append(params)
        MetaDataSqliteService.save_data_batch(params_list)

    def build_query_scn_sql(self, name):
        db_user_name = get_env_variable(f"job_protectObject_auth_authKey_{self._pid}")
        db_pass_word = get_env_variable(f"job_protectObject_auth_authPwd_{self._pid}")
        rman_params = {
            "is_enc_bk": 0,
            "instance_name": self._db_instance_name,
            "db_user": db_user_name,
            "db_password": db_pass_word,
            "rman_enc_section": "",
            "db_install_user": self._install_user_name
        }
        logger.info(f"Building query scn sql for {os.path.basename(name)}, pid: {self._pid}.")
        sql = "RUN {\n"
        if platform.system().lower() == "windows":
            sql += f"    catalog start with '{os.path.dirname(name)}\\';\n"
            name = name.upper()
        else:
            sql += f"    catalog start with '{os.path.dirname(name)}/';\n"
        sql += f"    select to_char(first_change#), to_char(next_change#) from v$archived_log where name='{name}';\n"
        sql += "}\n"
        return sql, rman_params

    def query_scn_from_db(self, name):
        logfile_name = os.path.basename(name)
        logger.info(f"Query scn from db for log file: {logfile_name}, pid:{self._pid}")
        tmp_sql_file = f"{PluginPathConstant.TMP_PATH}/query_scn_{logfile_name}.sql"
        tmp_rst_file = f"{PluginPathConstant.TMP_PATH}/query_scn_{logfile_name}.rst"
        tmp_sql, rman_params = self.build_query_scn_sql(name)
        write_tmp_file(tmp_sql, tmp_sql_file)
        if platform.system().lower() == "windows":
            ret_code = execute_windows_rman_cmd(self._pid, rman_params, 0, tmp_sql_file, tmp_rst_file)
            encoding = locale.getdefaultlocale()[1]
            try:
                rman_output = read_result_file(tmp_rst_file, encoding=encoding)
            except Exception as ex:
                logger.error(f"Failed to read query scn result file: {tmp_rst_file}, exception: {ex}.")
                return "", ""
        else:
            chown_file_path_owner(tmp_sql_file, self._install_user_name)
            ret_code = execute_linux_rman_cmd(self._pid, rman_params, 0, tmp_sql_file, tmp_rst_file)
            try:
                rman_output = read_result_file(tmp_rst_file)
            except Exception as ex:
                logger.error(f"Failed to read query scn result file: {tmp_rst_file}, exception: {ex}.")
                return "", ""
        delete_file(tmp_sql_file)
        delete_file(tmp_rst_file)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Query scn for {logfile_name} failed, ret_code={ret_code}, "
                         f"error is {rman_output}, pid:{self._pid}")
            return "", ""
        matches = re.findall(r"TO_CHAR\(FIRST_CHANGE#\)\s+-+\s+TO_CHAR\(NEXT_CHANGE#\)\s+-+\s+(\d+)\s+(\d+)",
                             rman_output)
        if matches:
            first_change, next_change = matches[0]
        else:
            logger.error("No matches scn found in RMAN output.")
            return "", ""
        logger.info(f"Query scn for {name} success, pid:{self._pid}.")
        return first_change, next_change

    def save_file_info(self):
        """
        记录副本的文件信息，用户单文件恢复
        """
        logger.info("Start to save file info.")
        oracle_version = self._protect_obj_extend_info.get("version", {})
        oracle_major_version = oracle_version.split(".")[0]
        # 12以上的副本支持单文件恢复，快照备份不支持单文件恢复
        if int(oracle_major_version) < 12 or self.is_storage_snapshot_backup():
            return
        is_log_copy = False
        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            is_log_copy = True
            # 日志副本初始化数据库
            MetaDataSqliteService.init_database(self._log_area)
        ctl_file_name = PluginPathConstant.ORACLE_BACKUP_CONTROL_FILE_NAME.value
        if self._backup_type == BackupTypeEnum.LOG_BACKUP.value:
            # 日志副本记录日志文件信息
            reset_log_dir = os.listdir(self._log_area)[0]
            for dir_name in os.listdir(self._log_area):
                if dir_name != "sqlite":
                    reset_log_dir = dir_name
                    break
            log_dir = os.path.join(self._log_area, reset_log_dir)
            for file_name in os.listdir(log_dir):
                if file_name.lower().endswith(".log"):
                    log_file = os.path.join(f"/{os.path.basename(self._log_area)}", reset_log_dir, file_name)
                    log_file_path = os.path.join(log_dir, file_name)
                    first_change, next_change = self.query_scn_from_db(log_file_path)
                    scn = f"{first_change}-{next_change}"
                    self.save_file_info_by_type(file_name, log_file, log_file_path, 'log_file', (scn, is_log_copy))
            ctl_file = os.path.join(f"/{os.path.basename(self._log_area)}", reset_log_dir, ctl_file_name)
            ctl_file_path = os.path.join(log_dir, ctl_file_name)
        else:
            # 数据副本记录数据、日志、参数文件信息
            additional_path = os.path.join(self._data_area[0], "additional", "dbfiles")
            addition_datafile_set, _ = read_additional_db_file(additional_path, ';')
            for data_dir_name in addition_datafile_set:
                data_dir = os.path.join(self._data_area[0], data_dir_name)
                data_file = os.path.join(f"/{data_dir_name}", os.listdir(data_dir)[0])
                data_file_path = os.path.join(data_dir, os.listdir(data_dir)[0])
                data_file_name = os.listdir(data_dir)[0]
                self.save_file_info_by_type(data_file_name, data_file, data_file_path, 'data_file')
            log_dir = os.path.join(self._data_area[0], "log")
            for log_file_name in os.listdir(log_dir):
                log_file = os.path.join(f"/log", log_file_name)
                log_file_path = os.path.join(log_dir, log_file_name)
                first_change, next_change = self.query_scn_from_db(log_file_path)
                scn = f"{first_change}-{next_change}"
                self.save_file_info_by_type(log_file_name, log_file, log_file_path, 'log_file', (scn, False))
            ctl_file = ctl_file_name
            ctl_file_path = os.path.join(self._data_area[0], ctl_file_name)
            parameter_file_name = f'ebackup-{self._protect_obj.get("name", "")}-pfile.ora'
            parameter_file_path = os.path.join(self._data_area[0], parameter_file_name)
            self.save_file_info_by_type(parameter_file_name, parameter_file_name, parameter_file_path, 'parameter_file')
        # 记录控制文件信息
        self.save_file_info_by_type(ctl_file_name, ctl_file, ctl_file_path, 'control_file', (None, is_log_copy))

    def save_file_info_by_type(self, file_name, file, file_path, file_type, other_info=(None, False)):
        """
        按文件类型传递参数到SQLite table
        @param file_name: 文件名
        @param file: 带目录信息的文件名，下发恢复任务时使用
        @param file_path: 文件路径，获取大小时使用
        @param file_type: 文件类型
        @param other_info: 1.scn信息 2.是否为日志备份
        """
        logger.info(f"Saving file info by type: {file_type}, is log copy: {other_info[1]}")
        if other_info[1]:
            db_path = self._log_area
        else:
            db_path = self._meta_area
        extend_info = {'catalogue': file, 'type': FileTypeMap.get(file_type), 'scn': other_info[0]}
        logger.info(f"{file_type} name: {file_name}, restore name: {file}, path: {file_path}.")
        size = 0
        try:
            size = os.path.getsize(file_path)
        except Exception as ex:
            logger.error(f"Get size of {file_name} failed, error: {ex}.")
        params = {
            "path": db_path,
            "name": file_name,
            "type": MetaDataConstant.TYPE_FILE,
            "parent_path": "/",
            "extend_info": json.dumps(extend_info),
            "parent_uuid": 0,
            "size": size
        }
        MetaDataSqliteService.save_data(params, True)

    def is_cluster_type(self):
        return self.sub_type == OracleDataBaseType.ORACLE_CLUSTER \
            or self.parent_sub_type == OracleDataBaseType.ORACLE_CLUSTER

    def _init_backup_param(self):
        # 标记副本是否有可用表信息用于表级恢复
        self.can_table_restore = True
        self.exec_cluster_back_job_policy = SubJobPolicyEnum.ANY_NODE.value
        self.exec_cluster_back_job_host_id = ''
        self._data_area = ''
        self._log_area = ''
        self._meta_area = ''
        self._cache_area = ''
        self.get_repository()
        self.backup_resetlogs_id = ''
        self.p_file_param = {}
        self.table_is_has_foreign_key_dict = {}
        self.backup_scn = ''
        self.check_archive_log_lost = False
