#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import ast
import json
import os
import platform
import stat

import psutil

from common.cleaner import clear
from common.common import output_result_file, execute_cmd, touch_file, retry_when_exception, read_tmp_json_file, \
    output_execution_result_ex, get_copy_info_by_type_list, get_host_sn, invoke_rpc_tool_interface, report_job_details
from common.common_models import ActionResult, SubJobDetails, SubJobModel, LogDetail
from common.const import ExecuteResultEnum, RepositoryName, SubJobStatusEnum, SubJobPolicyEnum, SubJobPriorityEnum, \
    CopyDataTypeEnum, ReportDBLabel, DBLogLevel, RpcToolInterface
from common.file_common import change_owner_by_name, change_mod, check_file_exist, delete_file, create_dir_recursive, \
    delete_path, exec_lchown_dir_recursively, exec_chmod_dir_recursively
from common.parse_parafile import ParamFileUtil, get_env_variable
from common.schemas.thrift_base_data_type import SubJobType, SubJob
from common.schemas.thrift_plugin import RestoreJob
from common.util.cmd_utils import get_livemount_path
from common.util.exec_utils import exec_mkdir_cmd, su_exec_touch_cmd
from oracle import logger
from oracle.common.common import parse_backup_path, write_tmp_json_file, convert_error_code, \
    get_log_path_by_meta_type, get_oracle_group, shutdown_database_abort, \
    mount_bind_path, umount_bind_path, get_instance_name_by_id, modify_instance_name_in_p_file, \
    clear_repository_dir_exclude_prefix, general_uuid
from oracle.common.constants import RecoverTargetType, RootCommand, PluginPathConstant, \
    CheckDBTypeEnum, ErrorCode, RestoreTarget, \
    RestoreByType, ScriptExitCode, CacheRepoFilename, OracleProgress, RestoreJobName, \
    RestoreResultKey, OracleDataBaseType, Platform, OracleReportLabel
from oracle.common.copy_func import CopyDBInfo, get_copy_db_info
from oracle.common.linux_common import execute_linux_cmd, get_linux_oracle_version, execute_script
from oracle.common.user_env_common import update_oracle_home_path, get_user_shell_type, update_oracle_env
from oracle.common.windows_common import get_low_auth_info, execute_windows_cmd, get_oracle_version
from oracle.services.restore.assist_ins_restore import AssistInsOracleRestore
from oracle.services.restore.pdb_restore import PdbRestore
from oracle.services.restore.restore_common import check_inst_status, check_disk_group_exits
from oracle.services.restore.restore_func import clean_aix_restore_ln_path
from oracle.services.restore.storage_restore import OracleStorageRestore
from oracle.services.restore.table_restore import TableOracleRestore
from oracle.services.restore.windows_restore import WindowsOracleRestore


def get_livemount_path_windows(job_id, path):
    """
    适配E6000，组装livemount路径作为恢复时，文件系统的挂载路径。
    """
    if not job_id:
        return path
    e_path = os.path.join(path, "livemount", job_id)
    if os.path.exists(e_path):
        return e_path
    return path


class OracleRestore:
    def __init__(self, pid, job_id, sub_job_id):
        self.assist_ins_id = None
        self._pid = pid
        self._job_id = job_id
        self._sub_job_id = sub_job_id
        self.db_name = ''
        self.db_password = ''
        self.user_name = ''
        self.original_db_instance = ''
        self.db_instance = ''
        self.db_user = ''
        self.db_user_pwd = ''
        self.oracle_home = ''
        self.oracle_base = ''
        self.asm_user = ''
        self.asm_user_pwd = ''
        self.channels = ''
        self.pit_time = ''
        self.pit_scn = ''
        # 调用shell脚本时传递的scn
        self.scn_to_script = ''
        self.data_repo_path = ''
        self.log_repo_path = ''
        self.cache_repo_path = ''
        self.meta_data_path = ''
        self.recover_target = ''
        self.recover_path = ''
        self.job_priority = ''
        self.job_name = ''
        self.recover_order = ''
        self.storage_type = 0
        self.asm_sid_name = ''
        self.db_type = 0
        self.enc_algo = ''
        self.enc_key = ''
        self.pfile_pid = ''
        self.restore_by = ''
        self.recover_num = ''
        # 用于新机恢复
        self.pfile_params = ''
        # oracle安装用户
        self.oracle_user = 'oracle'
        # oracle安装用户的组名
        self.oracle_group = ''
        self.access_oracle_base = ""
        self.access_oracle_home = ""
        self.target_location = ''
        self.resetlogs_id = ''
        # 用于日志恢复时catalog多个日志目录
        self.log_job_id_list = ''
        self.params_from_file = {}
        # 目标环境的sub_type
        self.target_sub_type = ''
        # 副本的sub_type
        self.copy_sub_type = ''
        self.asm_install_user = ''
        self.bct_status = ""
        self.backup_time = ""
        self.instances = ''
        self.target_db_run_user = ''
        self.target_db_run_user_pwd = ''
        self.restore_files = []
        self.is_modify_db_config = ''
        self.target_db_version = ''
        self.parent_subtype = ''
        self.list_cluster_other = []
        self.block_change_tracking = True
        self.pdb_restore = False
        self.pdb_names = []
        # 用于集群表级恢复
        self.service_ip = ''
        self.port = ''
        self.parse_common_params()

    def parse_common_params(self):
        if platform.system().lower() == "windows":
            self.params_from_file = ParamFileUtil.parse_param_windows_file(self._pid)
        else:
            self.params_from_file = ParamFileUtil.parse_param_file(self._pid)
        job = RestoreJob(**self.params_from_file.get('job'))
        self.target_sub_type = job.target_object.sub_type
        self.copy_sub_type = job.copies[0].protect_object.sub_type
        self.db_user = get_env_variable(f'job_copies_0_protectObject_auth_authKey_{self._pid}')
        self.db_user_pwd = get_env_variable(f'job_copies_0_protectObject_auth_authPwd_{self._pid}')
        try:
            self.get_asm_info(job)
        except Exception as exception:
            # 获取asm信息失败继续尝试恢复
            logger.debug(f'Get asm info failed, may not set it, exception:{exception}, job_id:{self._job_id}.')
        self.pfile_params = job.extend_info.get('RESTORE_TARGET_HOST')
        self.channels = job.extend_info.get('CHANNELS', '')
        self.bct_status = job.extend_info.get('bctStatus', "false")
        self.pit_time = job.extend_info.get('restoreTimestamp', '')
        self.pit_scn = job.extend_info.get('restoreScn', '')
        self.target_location = job.extend_info.get('targetLocation')
        self.instances = job.extend_info.get('instances', '')
        self.restore_files = job.extend_info.get('restoreFiles', '')
        if self.restore_files:
            self.restore_files = json.loads(self.restore_files)
        self.is_modify_db_config = job.extend_info.get('isModifyDBConfig', '')
        if self.target_location == RestoreTarget.ORIGINAL:
            self.recover_target = RecoverTargetType.SAME.value
        else:
            self.recover_target = RecoverTargetType.OTHER.value
        # oracle目标恢复目录（主机上的目录路径）,不指定即原位置
        self.recover_path = job.extend_info.get('RESTORE_PATH', '')
        # storage_type sh脚本固定用1
        self.storage_type = 1
        #  sh脚本固定用0
        self.db_type = 0
        self.block_change_tracking = job.copies[0].extend_info.get('block_change_tracking')
        # 从目标集群获取数据库安装用户名
        install_user_name = job.target_object.extend_info.get('installUsername', 'oracle')
        if install_user_name:
            self.oracle_user = install_user_name
        self.oracle_group = job.target_object.extend_info.get('oracle_group', 'oinstall')
        get_user_shell_type(self.oracle_user)
        # 用于没有配置环境变量的场景
        self.access_oracle_base = job.target_object.extend_info.get("accessOracleBase", "")
        self.access_oracle_home = job.target_object.extend_info.get("accessOracleHome", "")
        update_oracle_env(oracle_home=self.access_oracle_home, oracle_base=self.access_oracle_base)
        logger.info(f'Got params, target_sub_type:{self.target_sub_type}, copy_sub_type:{self.copy_sub_type},'
                    f'pit_time:{self.pit_time}, pit_scn:{self.pit_scn}, '
                    f'target_location:{self.target_location}, job_id:{self._job_id}.')
        self._parse_param_for_different_deploy_type(job)
        self._parse_param_for_different_restore_data()
        self._parse_param_for_pdb_restore(job)
        self._parse_param_for_different_copy_type(job)
        self.recover_num = len(job.target_env.nodes)
        self.parse_db_run_user_info()
        self.set_target_db_version(job)

    def set_access_oracle_home(self):
        if self.access_oracle_home:
            update_oracle_home_path(self.access_oracle_home)

    def set_target_db_version(self, job):
        cluster_instances = job.target_object.extend_info.get('instances', '')
        if self.target_sub_type == OracleDataBaseType.ORACLE_CLUSTER and cluster_instances:
            cluster_instances_list = json.loads(cluster_instances)
            if cluster_instances_list:
                self.target_db_version = cluster_instances_list[0].get('version', '')
        else:
            self.target_db_version = job.target_object.extend_info.get('version', '')

    def parse_db_run_user_info(self):
        if platform.system().lower() == "windows":
            ret, run_user_info = get_low_auth_info(self._pid)
            if run_user_info:
                self.target_db_run_user = run_user_info.get("low_auth_user", "")
                self.target_db_run_user_pwd = run_user_info.get("password", "")

    def get_asm_info(self, job):
        asm_info = json.loads(job.copies[0].protect_object.auth.extend_info.get('asmInfo'))
        self.asm_user = asm_info.get('authKey')
        self.asm_user_pwd = asm_info.get('authPwd')
        self.asm_install_user = asm_info.get('installUsername')

    def get_repo_info(self):
        """
        获取data、meta、cache、log仓库信息
        """
        copies = self.params_from_file.get("job", {}).get('copies')
        if platform.system().lower() != "windows":
            for copy_info in copies:
                repositories = copy_info.get("repositories", [])
                if copy_info.get('type') == CopyDataTypeEnum.LOG_COPY.value:
                    # 在日志副本中获取log仓路径
                    # 取第一个对象的第一个路径即可，此处log仓目录只获取其父目录
                    self.log_repo_path = get_livemount_path(self._job_id, get_log_path_by_meta_type(repositories))
                else:
                    # 在数据副本中获取data、meta、cache仓路径
                    repo_paths = parse_backup_path(repositories)
                    self.data_repo_path = get_livemount_path(self._job_id,
                                                             repo_paths.get(RepositoryName.DATA_REPOSITORY)[0])
                    self.create_cache_dir_if_not_exist(repo_paths)
                    self.backup_time = copy_info.get('timestamp')
        else:
            for copy_info in copies:
                repositories = copy_info.get("repositories", [])
                if copy_info.get('type') == CopyDataTypeEnum.LOG_COPY.value:
                    # 在日志副本中获取log仓路径
                    # 取第一个对象的第一个路径即可，此处log仓目录只获取其父目录
                    self.log_repo_path = get_livemount_path_windows(
                        self._job_id, get_log_path_by_meta_type(repositories))
                else:
                    # 在数据副本中获取data、meta、cache仓路径
                    repo_paths = parse_backup_path(repositories)
                    self.data_repo_path = get_livemount_path_windows(
                        self._job_id, repo_paths.get(RepositoryName.DATA_REPOSITORY)[0])
                    self.create_cache_dir_if_not_exist(repo_paths)
                    self.backup_time = copy_info.get('timestamp')

    def create_cache_dir_if_not_exist(self, repo_paths):
        if not repo_paths.get(RepositoryName.CACHE_REPOSITORY):
            self.cache_repo_path = os.path.join(self.data_repo_path, f'cache_{self._job_id}')
            create_dir_recursive(self.cache_repo_path)
        else:
            self.cache_repo_path = repo_paths.get(RepositoryName.CACHE_REPOSITORY)[0]

    def allow_restore_in_local_node(self):
        """是否允许本地运行

        前置、子任务均使用此接口，子任务有subJob参数。
        """
        if not self.params_from_file.get('subJob'):
            # 主任务
            if self.is_storage_snapshot_copy() \
                    and not OracleStorageRestore(self._job_id, self._sub_job_id, self._pid,
                                                 self.params_from_file).need_check_node_can_restore():
                # 不属于保护资源的agent，不需要判断数据库在线状态
                logger.info(f"no need_check_node_can_restore")
                response = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR.value)
                output_result_file(self._pid, response.dict(by_alias=True))
            else:
                self._allow_first_job()
        else:
            # 子任务
            if self.is_storage_snapshot_copy():
                response = ActionResult(code=ExecuteResultEnum.SUCCESS.value)
                output_result_file(self._pid, response.dict(by_alias=True))
            else:
                self.parse_sub_job_params()
                if self.job_name == RestoreJobName.FIRST_JOB:
                    self._allow_first_job()
                else:
                    self._allow_second_job()

    def restore_prerequisite(self):
        """恢复前置任务

        修改data仓库中addlitional目录下的权限，否则恢复将失败
        """
        self.get_repo_info()
        # 如果是存储快照恢复，需要获取lun信息
        if self.is_storage_snapshot_copy():
            response = OracleStorageRestore(self._job_id, self._sub_job_id, self._pid,
                                            self.params_from_file).restore_prerequisite()
        elif self.is_table_restore():
            if platform.system().lower() != "windows":
                self.prerequisite_change_file_owner()
            script_param = self.get_restore_script_param()
            tables = self.params_from_file.get("job").get("extendInfo").get("tables")
            is_overwrite = self.params_from_file.get("job").get("extendInfo").get("isOverwrite", False)
            restore_object = TableOracleRestore(self._job_id, self._pid, tables, self._sub_job_id, is_overwrite)
            restore_object.get_restore_vars(script_param)
            response = restore_object.table_restore_prerequisite()
        elif self.is_pdb_restore():
            response = self.restore_pdb_check()
        else:
            response = self.restore_db_check()
        # 向cache仓写标记文件供进度查询时使用
        result_file = os.path.join(self.cache_repo_path,
                                   f'{CacheRepoFilename.RESTORE_PREREQUISITE_PROGRESS}_{self._job_id}')
        if check_file_exist(result_file):
            logger.info(f'Remove progress flag:{result_file}, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
            delete_file(result_file)
        if response.code == ExecuteResultEnum.SUCCESS.value:
            # 脚本返回成功，刷新结果文件
            output_execution_result_ex(result_file, {
                RestoreResultKey.TASK_STATUS: SubJobStatusEnum.COMPLETED.value,
                RestoreResultKey.ERROR_CODE: ErrorCode.SUCCESS.value, RestoreResultKey.ERROR_PARAM: ''})
            response.code = ExecuteResultEnum.SUCCESS.value
            response.body_err = ErrorCode.SUCCESS.value
        else:
            err_params = ''
            if response.body_err_params:
                err_params = response.body_err_params[0]
            output_execution_result_ex(result_file, {
                RestoreResultKey.TASK_STATUS: SubJobStatusEnum.FAILED.value,
                RestoreResultKey.ERROR_CODE: response.body_err, RestoreResultKey.ERROR_PARAM: err_params})
        logger.info(f'Write restore prerequisite progress file successfully, path:{result_file}, '
                    f'job_id:{self._job_id}.')
        output_result_file(self._pid, response.dict(by_alias=True))
        logger.info(f'Run restore prerequisite successfully, job_id:{self._job_id}.')

    def restore_pdb_check(self):
        response = ActionResult(code=ExecuteResultEnum.SUCCESS.value)
        if not self.recover_path_check():
            response.code = ExecuteResultEnum.INTERNAL_ERROR.value
            response.body_err = ErrorCode.RECOVER_PATH_NOT_EXISTS.value
            response.body_err_params = [self.recover_path]
        if platform.system().lower() != "windows":
            self.prerequisite_change_file_owner()
        return response

    def restore_db_check(self):
        response = ActionResult(code=ExecuteResultEnum.SUCCESS.value)
        try:
            if not self.version_check():
                response.code = ExecuteResultEnum.INTERNAL_ERROR.value
                response.body_err = ErrorCode.ERROR_ORACLE_VERSION_DISMATCH.value
                return response
            if not self.recover_path_check():
                response.code = ExecuteResultEnum.INTERNAL_ERROR.value
                response.body_err = ErrorCode.RECOVER_PATH_NOT_EXISTS.value
                response.body_err_params = [self.recover_path]
                return response
            if platform.system().lower() != "windows":
                self.prerequisite_change_file_owner()
        except Exception as exception_str:
            logger.exception(exception_str)
            logger.error(f"chown error:{exception_str},job_id:{self._job_id}")
            response.code = ExecuteResultEnum.INTERNAL_ERROR.value
            response.body_err = ErrorCode.INTERNAL_ERROR.value
            return response
        return response

    def version_check(self):
        copy_db_info: CopyDBInfo = get_copy_db_info(self.data_repo_path)
        if not copy_db_info:
            return True
        if platform.system().lower() == "windows":
            [_, major_version, _] = get_oracle_version(self._pid)
        else:
            [_, major_version, _] = get_linux_oracle_version(self.oracle_user)
        return copy_db_info.check_version(major_version)

    def recover_path_check(self):
        if not self.recover_path:
            logger.info(f"recover_path is empty,no need to check,job_id:{self._job_id}")
            return True
        if os.path.exists(self.recover_path) and os.path.isdir(self.recover_path):
            exec_lchown_dir_recursively(self.recover_path, self.oracle_user, self.oracle_group)
            logger.info(f"recover_path exists,chown successfully:{self.recover_path}")
            return True
        # 本地路径不存在，直接报错
        if not self.recover_path[0] == "+":
            logger.error(f"local file path:{self.recover_path} not exits")
            return False
        if platform.system().lower() == Platform.WINDOWS:
            from oracle.common.windows_common import get_asm_instance
            asm_instance = get_asm_instance(self._job_id, "OracleASMService")
            asm_install_user = "grid"
        else:
            from oracle.common.linux_common import get_asm_install_user, get_asm_instance
            asm_install_user = get_asm_install_user(self.asm_install_user)
            asm_instance = get_asm_instance()
        return check_disk_group_exits(self._job_id, self.recover_path, asm_install_user, asm_instance, self.oracle_home)

    @retry_when_exception(retry_times=3, delay=3, logger=logger)
    def prerequisite_change_file_owner(self):
        # AIX需要去查询root用户所属的组
        if platform.system() == 'AIX':
            self.prerequisite_change_file_owner_aix()
            return
        root_group = get_oracle_group("root")
        if self.is_pdb_restore():
            cmd_list = [
                # 不修改隐藏目录/文件的权限
                f"find {self.data_repo_path} -name '.?*' -prune -o -exec "
                f"chown -h {self.oracle_user}:{self.oracle_group} {{}} +",
                f"chown -R -h root:{root_group} {self.data_repo_path}/additional",
                f"chmod -R 777 {self.data_repo_path}/additional",
                f"chown -R -h {self.oracle_user}:{self.oracle_group} {self.data_repo_path}/additional/dbs",
                f"chmod -R 640 {self.data_repo_path}/additional/dbs",
                f"chmod 775 {self.data_repo_path}/additional/dbs"
            ]
        else:
            cmd_list = [
                # 不修改隐藏目录/文件的权限
                f"find {self.data_repo_path} -name '.?*' -prune -o -exec "
                f"chown -h {self.oracle_user}:{self.oracle_group} {{}} +",
                f"chown -R -h root:{root_group} {self.data_repo_path}/additional",
                f"chmod -R 777 {self.data_repo_path}/additional",
                f"chown -R -h {self.oracle_user}:{self.oracle_group} {self.data_repo_path}/additional/dbs",
                f"chmod -R 640 {self.data_repo_path}/additional/dbs",
                f"chmod 775 {self.data_repo_path}/additional/dbs",
                f"chown -R -h {self.oracle_user}:{self.oracle_group} {self.data_repo_path}/additional/netadmin"
            ]

        if self.log_repo_path:
            cmd_list.append(
                f"find {self.log_repo_path}  -type d -name '.*' -o -name '.' -prune -o -exec "
                f"chown -h {self.oracle_user}:{self.oracle_group} {{}} +")
        for cmd in cmd_list:
            return_code, output, err_str = execute_cmd(cmd)
            if return_code != str(ScriptExitCode.SUCCESS):
                logger.error(f"Change owner failed, cmd:{cmd}, return_code:{return_code}, err_str:{err_str}, "
                             f"job_id:{self._job_id}.")
                raise Exception(f"Change owner failed.")
            logger.info(f'Run command successfully: {cmd}, job_id:{self._job_id}.')

    def prerequisite_change_file_owner_aix(self):
        if self.data_repo_path.__contains__("livemount"):
            self.chown_aix(self.data_repo_path)
            self.chown_aix(self.log_repo_path)
            return
        logger.info(f'prerequisite_change_file_owner_aix.')
        root_group = get_oracle_group("root")

        cmd = f"find {self.data_repo_path} -name '.?*' -prune -o -exec " \
              f"chown -h {self.oracle_user}:{self.oracle_group} {{}} +"
        return_code, output, err_str = execute_cmd(cmd)
        if return_code != str(ScriptExitCode.SUCCESS):
            logger.error(f"Change owner failed, cmd:{cmd}, return_code:{return_code}, err_str:{err_str}, "
                         f"job_id:{self._job_id}.")
            raise Exception(f"Change owner failed.")
        if not exec_lchown_dir_recursively(f"{self.data_repo_path}/additional", 'root', root_group):
            logger.error(f"exec_lchown_dir_recursively failed, path {self.data_repo_path}/additional.")
            raise Exception(f"Change owner additional failed.")
        cmd_list = [
            # 不修改隐藏目录/文件的权限
            f"chmod -R 777 {self.data_repo_path}/additional",
            f"chown -R -h {self.oracle_user}:{self.oracle_group} {self.data_repo_path}/additional/dbs",
            f"chmod -R 640 {self.data_repo_path}/additional/dbs",
            f"chmod 775 {self.data_repo_path}/additional/dbs",
        ]

        if self.log_repo_path:
            cmd_list.append(
                f"find {self.log_repo_path} -name '.?*' -prune -o -exec "
                f"chown -h {self.oracle_user}:{self.oracle_group} {{}} +")
        for cmd in cmd_list:
            return_code, output, err_str = execute_cmd(cmd)
            if return_code != str(ScriptExitCode.SUCCESS):
                logger.error(f"Change owner failed, cmd:{cmd}, return_code:{return_code}, err_str:{err_str}, "
                             f"job_id:{self._job_id}.")
                raise Exception(f"Change owner failed.")
            logger.info(f'Run command successfully: {cmd}, job_id:{self._job_id}.')
        if not exec_lchown_dir_recursively(f"{self.data_repo_path}/additional/netadmin",
                                           self.oracle_user, self.oracle_group):
            logger.error(f"exec_lchown_dir_recursively failed, path {self.data_repo_path}/netadmin.")
            raise Exception(f"Change owner netadmin failed.")

    def chown_aix(self, copy_data_path):
        logger.info(f'chown_aix restore.{self._job_id}')
        if not copy_data_path:
            return
        if not exec_lchown_dir_recursively(copy_data_path, self.oracle_user, self.oracle_group):
            logger.error(f"exec_lchown_dir_recursively {copy_data_path} failed,{self._job_id}")
            raise Exception(f"Change owner data_path failed.")
        exec_chmod_dir_recursively(copy_data_path, 0o755)
        logger.info(f"chown path:{copy_data_path} success,{self._job_id}")

    def restore_prerequisite_progress(self):
        """查询恢复前置任务进度
        """
        self.get_repo_info()
        result_file = os.path.join(self.cache_repo_path,
                                   f'{CacheRepoFilename.RESTORE_PREREQUISITE_PROGRESS}_{self._job_id}')
        # 文件存在则认为前置任务结束，否则为进行中
        if os.path.exists(result_file):
            result_data = read_tmp_json_file(result_file)
            task_status = result_data.get(RestoreResultKey.TASK_STATUS, SubJobStatusEnum.RUNNING.value)
            if task_status and task_status == SubJobStatusEnum.FAILED.value:
                error_code = result_data.get(RestoreResultKey.ERROR_CODE)
                contents = result_data.get(RestoreResultKey.ERROR_PARAM)
                # 失败
                log_detail = LogDetail(logInfo=ReportDBLabel.PRE_REQUISIT_FAILED,
                                       logInfoParam=[self._job_id],
                                       logLevel=DBLogLevel.ERROR.value,
                                       logDetail=error_code,
                                       logDetailParam=[contents],
                                       logDetailInfo=[])
                sub_job_detail = SubJobDetails(taskId=self._job_id,
                                               subTaskId=self._sub_job_id,
                                               progress=OracleProgress.PROGRESS_FIFTY,
                                               logDetail=[log_detail],
                                               taskStatus=task_status,
                                               extendInfo={'nodeId': get_host_sn()})
                logger.error(
                    f'Restore job failed, task_status:{task_status}, log_detail:{log_detail}, job_id:{self._job_id}')
                output_result_file(self._pid, sub_job_detail.dict(by_alias=True))
            else:
                logger.info(
                    f'Restore prerequisite progress: {SubJobStatusEnum.COMPLETED.value}, job_id:{self._job_id}.')
                progress = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                         progress=OracleProgress.PROGRESS_ONE_HUNDRED,
                                         taskStatus=SubJobStatusEnum.COMPLETED.value)
                output_result_file(self._pid, progress.dict(by_alias=True))
        else:
            logger.debug(f'Restore prerequisite progress: {SubJobStatusEnum.RUNNING.value}, job_id:{self._job_id}.')
            progress = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                     progress=OracleProgress.PROGRESS_FIFTY,
                                     taskStatus=SubJobStatusEnum.RUNNING.value)
            output_result_file(self._pid, progress.dict(by_alias=True))

    def restore_gen_sub_job(self):
        """恢复分解子任务

        """
        if self.is_storage_snapshot_copy():
            restore_object = OracleStorageRestore(self._job_id, self._sub_job_id, self._pid,
                                                  self.params_from_file)
            script_param = self.get_restore_script_param()
            restore_object.get_restore_vars(script_param)
            response = restore_object.gen_sub_job()
            logger.info(f'Restore generate snapshot sub job successfully, response:{response}.')
        elif self.is_pdb_restore() and self.parent_subtype == OracleDataBaseType.ORACLE_CLUSTER:
            response = self.gen_pdb_cluster_sub_job()
        else:
            response = []
            # 第一个子任务优先级设置为1 recover_order设置为1，policy设置为重试
            job_info = json.dumps({'recover_order': 1})
            response.append(
                SubJobModel(
                    jobId=self._job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value,
                    jobPriority=SubJobPriorityEnum.JOB_PRIORITY_1.value, jobName=RestoreJobName.FIRST_JOB,
                    policy=SubJobPolicyEnum.RETRY_OTHER_NODE_WHEN_FAILED.value,
                    jobInfo=job_info
                ).dict(by_alias=True)
            )

            # 如果是集群，再拆分一个任务，优先级设置为2 recover_order设置为2，每个节点执行开启数据库操作
            if (self.target_sub_type == OracleDataBaseType.ORACLE_CLUSTER and not self.is_table_restore()
                    and not self.is_pdb_restore()):
                second_job_info = json.dumps({'recover_order': 2})
                response.append(
                    SubJobModel(
                        jobId=self._job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value,
                        jobPriority=SubJobPriorityEnum.JOB_PRIORITY_2.value, jobName=RestoreJobName.SECOND_JOB,
                        policy=SubJobPolicyEnum.EVERY_NODE_ONE_TIME.value, ignoreFailed=True,
                        jobInfo=second_job_info
                    ).dict(by_alias=True)
                )
        output_result_file(self._pid, response)
        logger.info(f'Restore generate sub job successfully, job_id:{self._job_id}.')

    def gen_pdb_cluster_sub_job(self):
        response = []
        # 集群场景的PDB集恢复，优先级设置为1，恢复前每个节点执行关闭PDB操作
        job_info_close_pdb = json.dumps({'recover_order': 1})
        response.append(
            SubJobModel(
                jobId=self._job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value,
                jobPriority=SubJobPriorityEnum.JOB_PRIORITY_1.value, jobName=RestoreJobName.PDB_PRE_JOB,
                policy=SubJobPolicyEnum.EVERY_NODE_ONE_TIME.value,
                jobInfo=job_info_close_pdb
            ).dict(by_alias=True)
        )
        # 第二个子任务，进行恢复，优先级设置为2，policy设置为重试
        job_info_recover = json.dumps({'recover_order': 2})
        response.append(
            SubJobModel(
                jobId=self._job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value,
                jobPriority=SubJobPriorityEnum.JOB_PRIORITY_2.value, jobName=RestoreJobName.FIRST_JOB,
                policy=SubJobPolicyEnum.RETRY_OTHER_NODE_WHEN_FAILED.value,
                jobInfo=job_info_recover
            ).dict(by_alias=True)
        )
        # 第三个子任务，优先级设置为3，恢复完成后，每个节点执行开启PDB操作
        job_info_open_pdb = json.dumps({'recover_order': 3})
        response.append(
            SubJobModel(
                jobId=self._job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value,
                jobPriority=SubJobPriorityEnum.JOB_PRIORITY_3.value, jobName=RestoreJobName.PDB_POST_JOB,
                policy=SubJobPolicyEnum.EVERY_NODE_ONE_TIME.value,
                jobInfo=job_info_open_pdb
            ).dict(by_alias=True)
        )
        return response

    def restore_windows_mount(self):
        if self.data_repo_path:
            restore_data_path = PluginPathConstant.WINDOWS_ORACLE_RESTORE_DATA_FILE_PATH.value
            if not mount_bind_path(self.data_repo_path, restore_data_path):
                logger.error(f"Mount bind data path failed, job_id:{self._job_id}.")
                return False
        if self.log_repo_path:
            restore_log_path = PluginPathConstant.WINDOWS_ORACLE_RESTORE_LOG_FILE_PATH.value
            if not mount_bind_path(self.log_repo_path, restore_log_path):
                logger.error(f"Mount bind log path failed, job_id:{self._job_id}.")
                return False
        logger.info(f"Restore mount bind success, pid:{self._job_id}.")
        return True

    def restore_aix_mount(self):
        if self.data_repo_path and not self.aix_ln(self.data_repo_path, PluginPathConstant.AIX_RESTORE_DATA):
            logger.error(f"ln -sf aix bind data path failed, job_id:{self._job_id}.")
            return False
        if self.log_repo_path and not self.aix_ln(self.log_repo_path, PluginPathConstant.AIX_RESTORE_LOG):
            logger.error(f"ln -sf aix bind log path failed, job_id:{self._job_id}.")
            return False
        logger.info(f"Restore mount bind success, pid:{self._job_id}.")
        return True

    def aix_ln(self, src_area, des_area):
        logger.info(f"src_area:{src_area},des_area:{des_area},job_id:{self._job_id}")
        delete_path(des_area)
        exec_mkdir_cmd(des_area, is_check_white_list=False)
        exec_cmd_str = f"ln -sf {src_area} {des_area}"
        ret, std_out, std_err = execute_cmd(exec_cmd_str)
        logger.info(f"ret:{ret},std_out:{std_out}, std_err:{std_err},cmd:{exec_cmd_str}")
        if not ret:
            logger.error(f"aix ln error:{self._job_id}")
            return False
        self.chown_aix(des_area)
        return True

    def restore(self):
        self.prepare_params()
        if platform.system().lower() == Platform.WINDOWS:
            if not self.restore_windows_mount():
                response = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR.value)
                output_result_file(self._pid, response.dict(by_alias=True))
                return
        if platform.system().lower() == Platform.AIX and self.data_repo_path.__contains__("livemount"):
            if not self.restore_aix_mount():
                response = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR.value)
                output_result_file(self._pid, response.dict(by_alias=True))
                return
        script_param = self.get_restore_script_param()
        if self.is_storage_snapshot_copy():
            # 快照恢复
            exit_status = self.storage_snapshot_copy_restore(script_param).exe_restore()
        elif self.is_table_restore():
            exit_status = self.execute_restore_table(script_param)
            self.handel_restore_table_and_pdb_result(exit_status)
            return
        elif self.is_pdb_restore():
            exit_status = self.execute_restore_pdb()
            self.handel_restore_table_and_pdb_result(exit_status)
            return
        else:
            # 恢复
            exit_status = self.linux_window_restore(script_param)

        logger.info(f'Restore script end, code:{exit_status}, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')

        result_file = os.path.join(self.cache_repo_path, f'{CacheRepoFilename.RESTORE_PROGRESS}_{self._sub_job_id}')
        if check_file_exist(result_file):
            logger.info(f'Remove progress flag:{result_file}, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
            delete_file(result_file)

        # 根据exit_status做相应的处理
        response = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR.value)
        self.handle_exit_status(exit_status, response, result_file)

        output_result_file(self._pid, response.dict(by_alias=True))

    def handle_exit_status(self, exit_status, response, result_file):
        if exit_status == ExecuteResultEnum.SUCCESS.value:
            # 脚本返回成功，刷新结果文件
            output_execution_result_ex(result_file, {
                RestoreResultKey.TASK_STATUS: SubJobStatusEnum.COMPLETED.value,
                RestoreResultKey.ERROR_CODE: ErrorCode.SUCCESS.value, RestoreResultKey.ERROR_PARAM: ''})
            response.code = ExecuteResultEnum.SUCCESS.value
            response.body_err = ErrorCode.SUCCESS.value
        else:
            # 单文件恢复不需要关闭数据库
            if not self.restore_files:
                # 为避免影响切换节点的恢复，恢复失败尝试关闭数据库，不管成功与否。
                shutdown_database_abort(self._pid, self.db_instance, self.oracle_user, self.db_user_pwd)
            # shell脚本返回的错误码需要转换为omrp上的错误码界面才能正确显示错误信息
            error_code, contents = convert_error_code(self._job_id, self._sub_job_id, exit_status)
            output_execution_result_ex(result_file, {
                RestoreResultKey.TASK_STATUS: SubJobStatusEnum.FAILED.value,
                RestoreResultKey.ERROR_CODE: error_code, RestoreResultKey.ERROR_PARAM: contents})
            response.body_err = ErrorCode.ERROR_AGENT_RESTORE_DATABASE_FAILED.value
            logger.error(f'Restore script code:{exit_status}, job_id:{self._job_id},sub_job_id:{self._sub_job_id}.')

    def linux_window_restore(self, script_param):
        if platform.system().lower() == "windows":
            restore_object = WindowsOracleRestore(self._pid, PluginPathConstant.SCRIPTS_PATH.value, self._sub_job_id)
            restore_object.get_restore_vars(script_param)
            exit_status = restore_object.exe_restore()
        else:
            exit_status = self.linux_exe_restore(script_param)
        return exit_status

    def storage_snapshot_copy_restore(self, script_param):
        logger.info(f'Recover is_storage_snapshot_copy, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
        restore_object = OracleStorageRestore(self._job_id, self._sub_job_id, self._pid, self.params_from_file)
        restore_object.get_restore_vars(script_param)
        return restore_object

    def linux_exe_restore(self, script_param):
        pass_content = {
            "Password": script_param.get('Password'),
            "ASMPassword": script_param.get('ASMPassword'),
            'EncKey': script_param.get('EncKey')
        }
        tmp_param_path = self.generate_temp_param_file(script_param)
        try:
            scripts_path_value = PluginPathConstant.SCRIPTS_PATH.value
            cmd = f"sh {scripts_path_value}/{RootCommand.NATIVE_RESTORE.value} " \
                  f"{scripts_path_value} {self._sub_job_id} {len(script_param)} {tmp_param_path}"
            logger.info(f'Start call restore script, cmd: {cmd}, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
            return execute_script(cmd, pass_content)
        finally:
            delete_file(tmp_param_path)
            clear(pass_content.get('Password'))
            clear(pass_content.get('ASMPassword'))
            clear(pass_content.get('EncKey'))

    def generate_temp_param_file(self, script_param):
        tmp_param = script_param
        # 将密码置空，写文件
        tmp_param['Password'] = ''
        tmp_param['ASMPassword'] = ''
        tmp_param['EncKey'] = ''
        # 若为单文件恢复，拆分不同类型的文件
        logger.info(f"Restore files: {self.restore_files}")
        if self.restore_files:
            tmp_param['restoreFiles'] = True
            for files in self.restore_files:
                if files.get('files', ''):
                    tmp_param[files.get('filterTpe')] = files.get('files', '')
        else:
            tmp_param['restoreFiles'] = ''
        tmp_param_path = f'{PluginPathConstant.NONE_ROOT_TMP_PATH.value}/script_param_{self._sub_job_id}'
        write_tmp_json_file(tmp_param_path, self._job_id, tmp_param)
        return tmp_param_path

    def handel_restore_table_and_pdb_result(self, exit_status):
        result_file = os.path.join(self.cache_repo_path, f'{CacheRepoFilename.RESTORE_PROGRESS}_{self._sub_job_id}')
        response = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR.value)
        if check_file_exist(result_file):
            logger.info(f'Remove progress flag:{result_file}, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
            delete_file(result_file)
        if exit_status == ExecuteResultEnum.SUCCESS.value:
            # 脚本返回成功，刷新结果文件
            output_execution_result_ex(result_file, {
                RestoreResultKey.TASK_STATUS: SubJobStatusEnum.COMPLETED.value,
                RestoreResultKey.ERROR_CODE: ErrorCode.SUCCESS.value, RestoreResultKey.ERROR_PARAM: ''})
            response.code = ExecuteResultEnum.SUCCESS.value
            response.body_err = ErrorCode.SUCCESS.value
        else:
            # shell脚本返回的错误码需要转换为omrp上的错误码界面才能正确显示错误信息
            error_code, contents = convert_error_code(self._job_id, self._sub_job_id, exit_status)
            output_execution_result_ex(result_file, {
                RestoreResultKey.TASK_STATUS: SubJobStatusEnum.FAILED.value,
                RestoreResultKey.ERROR_CODE: error_code, RestoreResultKey.ERROR_PARAM: contents})
            response.body_err = ErrorCode.ERROR_AGENT_RESTORE_DATABASE_FAILED.value
            logger.error(f'Restore script code:{exit_status}, job_id:{self._job_id},sub_job_id:{self._sub_job_id}.')
        output_result_file(self._pid, response.dict(by_alias=True))

    def execute_restore_table(self, script_param):
        logger.info(f'Recover is_table_restore, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
        if self._is_cdb_database_copy():
            ret_code = self.check_tns_configured()
            if ret_code != ScriptExitCode.SUCCESS:
                logger.error(f'TNS not configured, ret_code: {ret_code}')
                return ret_code
        rec_memory_limit_gb = self.params_from_file.get("job").get("extendInfo").get("recMemoryLimitGb")
        assist_ins_restore_service = AssistInsOracleRestore(
            self._pid, PluginPathConstant.SCRIPTS_PATH.value, self._job_id, self._sub_job_id, rec_memory_limit_gb,
            self.block_change_tracking)
        assist_ins_restore_service.get_restore_vars(self.get_restore_script_param(),
                                                    self.get_restore_script_param_pdb())
        assist_ins_restore_service.get_init_assist_ins_vars(self.get_init_assist_ins_param())
        is_overwrite = self.params_from_file.get("job").get("extendInfo").get("isOverwrite")
        tables = self.params_from_file.get("job").get("extendInfo").get("tables")
        table_restore_service = TableOracleRestore(
            self._job_id, self._pid, tables, self._sub_job_id, is_overwrite,
            assist_ins_restore_service.get_assist_ins_sid()
        )
        table_restore_service.get_restore_vars(script_param)
        try:
            # 即时挂载辅助实例
            ret_code = assist_ins_restore_service.mount_assist_ins()
            if ret_code != ScriptExitCode.SUCCESS:
                logger.error(f'restore_assist_ins error, ret_code: {ret_code}')
                return ret_code

            # 执行数据泵表导出导入
            ret_code = table_restore_service.table_restore()
            if ret_code != ScriptExitCode.SUCCESS:
                logger.error(f'exe_restore error, ret_code: {ret_code}')
                return ret_code
        except Exception as exception:
            logger.error(f"execute_restore_table failed, exception: {str(exception)},"
                         f" job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.")
            return ScriptExitCode.ERROR_RESTORE_FAILED
        finally:
            # 清理辅助实例
            pdb_name = None
            tables_data = json.loads(tables)
            if tables_data and tables_data[0]:
                pdb_name = tables_data[0].get("pdb_name", None)
            ret_code = assist_ins_restore_service.clear_assist_ins(pdb_name)
            if ret_code != ScriptExitCode.SUCCESS:
                logger.error(f'delete_sub_instance_by_id error, ret_code: {ret_code}')

        return ScriptExitCode.SUCCESS

    def execute_restore_pdb(self):
        logger.info(f'Recover is pdb restore, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
        rec_memory_limit_gb = self.params_from_file.get("job").get("extendInfo").get("recMemoryLimitGb", "2")
        logger.info(f"rec_memory_limit_gb:{rec_memory_limit_gb}")
        assist_ins_restore_service = AssistInsOracleRestore(
            self._pid, PluginPathConstant.SCRIPTS_PATH.value, self._job_id, self._sub_job_id, rec_memory_limit_gb,
            self.block_change_tracking)
        assist_ins_restore_service.get_restore_vars(self.get_restore_script_param(),
                                                    self.get_restore_script_param_pdb())
        assist_ins_restore_service.get_init_assist_ins_vars(self.get_init_assist_ins_param())
        is_overwrite = self.params_from_file.get("job").get("extendInfo").get("isOverwrite")
        pdb_list = self.params_from_file.get("job").get("copies")[0].get("protectObject").get("extendInfo").get("pdb")
        params = self.build_pdb_restore_params()
        self.parse_sub_job_params()
        pdb_restore_service = PdbRestore(
            self._pid, pdb_list, is_overwrite, params, assist_ins_restore_service.get_assist_ins_sid())
        # PDB集恢复前置子任务，关闭各节点PDB，确保后续能够删除(仅集群场景)
        if self.recover_order == 1 and self.parent_subtype == OracleDataBaseType.ORACLE_CLUSTER:
            ret_code = pdb_restore_service.pdb_cluster_pre_job()
            if ret_code != ScriptExitCode.SUCCESS:
                logger.error(f'exe pdb cluster restore pre_job error, ret_code: {ret_code}')
                return ret_code
        # PDB集恢复后置子任务，保证恢复后各节点PDB均为开启状态(仅集群场景)
        elif self.recover_order == 3 and self.parent_subtype == OracleDataBaseType.ORACLE_CLUSTER:
            ret_code = pdb_restore_service.pdb_cluster_post_job()
            if ret_code != ScriptExitCode.SUCCESS:
                logger.error(f'exe pdb cluster restore post_job error, ret_code: {ret_code}')
                return ret_code
        else:
            try:
                # 即时挂载辅助实例
                ret_code = assist_ins_restore_service.mount_assist_ins()
                if ret_code != ScriptExitCode.SUCCESS:
                    logger.error(f'restore_assist_ins error, ret_code: {ret_code}')
                    return ret_code

                # 执行拔插pdb数据库
                ret_code = pdb_restore_service.pdb_restore()
                if ret_code != ScriptExitCode.SUCCESS:
                    logger.error(f'exe_restore error, ret_code: {ret_code}')
                    return ret_code
            except Exception as exception:
                logger.error(f"execute_restore_table failed, exception: {str(exception)},"
                             f" job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.")
                return ScriptExitCode.ERROR_RESTORE_FAILED
            finally:
                # 清理辅助实例
                pdb_name = pdb_list[0] if pdb_list else None
                ret_code = assist_ins_restore_service.clear_assist_ins(pdb_name)
                if ret_code != ScriptExitCode.SUCCESS:
                    logger.error(f'delete_sub_instance_by_id error, ret_code: {ret_code}')

        return ScriptExitCode.SUCCESS

    def build_pdb_restore_params(self):
        target_ins_id = self.db_instance
        restore_path = self.params_from_file.get("job").get("extendInfo").get("RESTORE_PATH")
        is_open_pdb = self.params_from_file.get("job").get("extendInfo").get("isOpenPdb", "")
        target_db_name = self.params_from_file.get("targetObject", {}).get("name", "")
        return {
            'target_ins_id': target_ins_id,
            'ora_db_user': self.oracle_user,
            'oracle_group': self.oracle_group,
            'restore_path': restore_path,
            'main_backup_path': self.data_repo_path,
            'cacheRepoPath': self.cache_repo_path,
            'list_cluster_other': self.list_cluster_other,
            'is_open_pdb': is_open_pdb,
            'asm_instance': self.asm_sid_name,
            'asm_install_user': self.asm_install_user,
            'oracle_home': self.oracle_home,
            'target_db_name': target_db_name
        }

    def prepare_params(self):
        self.get_repo_info()
        self.parse_sub_job_params()
        self.parse_resetlogs_id_params()
        # 如果是表恢复或PDB恢复则跳过创建pfile
        if self.is_table_restore() or self.is_pdb_restore():
            logger.info(
                f'Table or pdb restore, skip create_p_file, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
            return
        # 如果是单文件恢复，选择修改数据库配置，需要创建pfile
        if self.restore_files:
            logger.info(f'File restore, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
            if self.is_modify_db_config == 'true':
                self.create_p_file()
                return
        # 如果是异机恢复需要创建pfile
        if self.recover_target == RecoverTargetType.OTHER.value:
            logger.info(f'Recover to other node, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
            if not self.is_storage_snapshot_copy():
                self.create_p_file()

    def handle_resetlogs_id(self):
        if not self.log_repo_path:
            return ''
        associated_log_copies = self.params_from_file.get("job").get("extendInfo").get("associated_log_copies", {})
        if not associated_log_copies:
            logger.warning(f'associated_log_copies is null')
            return ''
        new_job_id_list = set()
        for key, _ in associated_log_copies.items():
            new_job_id_list.add(key)
        logger.info(f'new_job_id_list is {new_job_id_list}')
        return ",".join(new_job_id_list)

    def restore_progress(self):
        """查询恢复子任务进度

        """
        self.get_repo_info()
        result_file = os.path.join(self.cache_repo_path, f'{CacheRepoFilename.RESTORE_PROGRESS}_{self._sub_job_id}')
        result_data = read_tmp_json_file(result_file)
        task_status = result_data.get(RestoreResultKey.TASK_STATUS, SubJobStatusEnum.RUNNING.value)
        # 设置进度
        if task_status == SubJobStatusEnum.RUNNING.value:
            # 任务未完成时将进度设置为50%
            progress = OracleProgress.PROGRESS_FIFTY
        else:
            logger.info(f'Restore job end, task_status:{task_status}, job_id:{self._job_id}, '
                        f'sub_job_id:{self._sub_job_id}.')
            progress = OracleProgress.PROGRESS_ONE_HUNDRED

        # 设置任务详情，如果有错误则上报错误码
        if task_status == SubJobStatusEnum.FAILED.value:
            error_code = result_data.get(RestoreResultKey.ERROR_CODE)
            contents = result_data.get(RestoreResultKey.ERROR_PARAM)
            # 失败
            log_detail = LogDetail(logInfo=ReportDBLabel.RESTORE_SUB_FAILED,
                                   logInfoParam=[self._sub_job_id],
                                   logLevel=DBLogLevel.ERROR.value,
                                   logDetail=error_code,
                                   logDetailParam=[],
                                   logDetailInfo=[contents])
            sub_job_detail = SubJobDetails(taskId=self._job_id,
                                           subTaskId=self._sub_job_id,
                                           progress=progress,
                                           logDetail=[log_detail],
                                           taskStatus=task_status,
                                           extendInfo={'nodeId': get_host_sn()})
            logger.error(
                f'Restore job failed, task_status:{task_status}, log_detail:{log_detail}, job_id:{self._job_id}, '
                f'sub_job_id:{self._sub_job_id}.')
            output_result_file(self._pid, sub_job_detail.dict(by_alias=True))
        elif task_status == SubJobStatusEnum.COMPLETED.value:
            # 子任务执行成功
            sub_job_detail = self.get_sub_job_detail(progress, task_status, ReportDBLabel.SUB_JOB_SUCCESS)
            output_result_file(self._pid, sub_job_detail.dict(by_alias=True))
        else:
            # 子任务运行中
            sub_job_detail = self.get_sub_job_detail(progress, task_status, ReportDBLabel.RESTORE_SUB_START_COPY)
            output_result_file(self._pid, sub_job_detail.dict(by_alias=True))

    def get_sub_job_detail(self, progress, task_status, report_db_label):
        """
        功能描述：构造任务上报日志
        参数：
        @progress： 任务进度
        @task_status： 任务状态
        @report_db_label： lab
        返回值：@sub_job_detail: 任务日志结构体
        """
        log_detail = LogDetail(logInfo=report_db_label,
                               logInfoParam=[self._sub_job_id],
                               logLevel=DBLogLevel.INFO.value,
                               logDetailParam=[])
        sub_job_detail = SubJobDetails(taskId=self._job_id,
                                       subTaskId=self._sub_job_id,
                                       progress=progress,
                                       logDetail=[log_detail],
                                       taskStatus=task_status,
                                       extendInfo={'nodeId': get_host_sn()})
        return sub_job_detail

    def get_start_db_sub_job_detail(self, progress, task_status, report_db_label):
        path = self.recover_path if self.recover_path else "original location"
        log_detail = LogDetail(logInfo=report_db_label,
                               logInfoParam=[self._sub_job_id, path],
                               logLevel=DBLogLevel.INFO.value,
                               logDetailParam=[])
        sub_job_detail = SubJobDetails(taskId=self._job_id,
                                       subTaskId=self._sub_job_id,
                                       progress=progress,
                                       logDetail=[log_detail],
                                       taskStatus=task_status,
                                       extendInfo={'nodeId': get_host_sn()})
        return sub_job_detail

    def parse_sub_job_params(self):
        sub_job = SubJob(**self.params_from_file.get('subJob'))

        self.job_priority = sub_job.job_priority
        self.job_name = sub_job.job_name
        logger.info(f'Restore jobPriority:{self.job_priority}, job_name:{self.job_name}, {self._job_id}.')
        if self.job_name in (RestoreJobName.FIRST_JOB, RestoreJobName.SECOND_JOB,
                             RestoreJobName.PDB_PRE_JOB, RestoreJobName.PDB_POST_JOB):
            self.recover_order = json.loads(sub_job.job_info).get('recover_order')

    def parse_resetlogs_id_params(self):
        self.log_job_id_list = self.handle_resetlogs_id()
        logger.info(f'Restore log_job_id_list:{self.log_job_id_list}, {self._job_id}.')

    def get_restore_script_param(self):
        data_path, log_path = self.get_platform_data_and_log_path()
        is_start_db = self.params_from_file.get("job").get("extendInfo").get("isStartDB", "")
        return {
            'AppName': self.db_name,
            'OriginalInstanceName': self.original_db_instance,
            'InstanceName': self.db_instance,
            'UserName': self.db_user,
            'Password': self.db_user_pwd,
            'OracleHome': self.oracle_home,
            'OracleBase': self.oracle_base,
            'AccessOracleHome': self.access_oracle_home,  # 之前sh没用到，现在用于没有配置环境变量的场景
            'AccessOracleBase': self.access_oracle_base,
            'ASMUserName': self.asm_user,
            'ASMPassword': self.asm_user_pwd,
            'Channel': self.channels,
            'pitTime': self.pit_time,
            'pitScn': self.scn_to_script,
            'recoverTarget': self.recover_target,
            'recoverPath': self.recover_path,
            'recoverOrder': self.recover_order,
            'storType': self.storage_type,
            'ASMInstanceName': self.asm_sid_name,
            'dbType': self.db_type,
            'EncAlgo': self.enc_algo,
            'EncKey': self.enc_key,
            'pfilePID': self.pfile_pid,
            'RestoreBy': self.restore_by,
            'recoverNum': self.recover_num,
            'OracleInstallUser': self.oracle_user,
            'OracleInstallGroup': self.oracle_group,
            'GridInstallUser': self.asm_install_user,
            'DataPath': data_path,
            'MetaDataPath': self.meta_data_path,
            'LogPath': log_path,
            'JobIdList': self.log_job_id_list,
            "bctStatus": self.bct_status,
            "backupTime": self.backup_time,
            "cacheRepoPath": self.cache_repo_path,
            "targetDbRunUser": self.target_db_run_user,
            "targetDbRunUserPwd": self.target_db_run_user_pwd,
            "restoreFiles": self.restore_files,
            "isStartDB": is_start_db,
            'targetDbVersion': self.target_db_version,
            'service_ip': self.service_ip,
            'target_sub_type': self.target_sub_type,
            'port': self.port
        }

    def get_platform_data_and_log_path(self):
        if platform.system().lower() == "windows" and self.data_repo_path:
            data_repo_path = PluginPathConstant.WINDOWS_ORACLE_RESTORE_DATA_FILE_PATH
        else:
            data_repo_path = self.data_repo_path
        if platform.system().lower() == "windows" and self.log_repo_path:
            log_repo_path = PluginPathConstant.WINDOWS_ORACLE_RESTORE_LOG_FILE_PATH
        else:
            log_repo_path = self.log_repo_path
        if platform.system().lower() == Platform.AIX and self.data_repo_path.__contains__("livemount"):
            if self.data_repo_path:
                data_repo_path = os.path.join(PluginPathConstant.AIX_RESTORE_DATA,
                                              os.path.basename(self.data_repo_path))
                logger.info(f"data_path:{data_repo_path}")
            if self.log_repo_path:
                log_repo_path = os.path.join(PluginPathConstant.AIX_RESTORE_LOG, os.path.basename(self.log_repo_path))
                logger.info(f"log_path:{log_repo_path}")
        return data_repo_path, log_repo_path

    def get_restore_script_param_pdb(self):
        return {
            'pdb_names': self.pdb_names,
            'is_pdb_restore': self.pdb_restore,
            'resetlogs_id': self.resetlogs_id
        }

    def get_init_assist_ins_param(self):
        return {
            'copySubType': self.copy_sub_type,
            'parent_subtype': self.parent_subtype
        }

    def umount_bind_backup_path(self):
        """
        解绑定备份路径
        """
        if not platform.system().lower() == Platform.WINDOWS:
            return
        if self.log_repo_path:
            umount_bind_path(PluginPathConstant.WINDOWS_ORACLE_RESTORE_LOG_FILE_PATH.value)
        if self.data_repo_path:
            umount_bind_path(PluginPathConstant.WINDOWS_ORACLE_RESTORE_DATA_FILE_PATH.value)
        delete_path(PluginPathConstant.WINDOWS_ORACLE_RESTORE_LOG_FILE_PATH.value)
        delete_path(PluginPathConstant.WINDOWS_ORACLE_RESTORE_DATA_FILE_PATH.value)

    def restore_post_job(self):
        """
        恢复后置子任务, 清理临时文件
        """
        self.get_repo_info()
        # 清理非后置任务进度的文件，后置任务进度文件在自己的子任务完成后清理
        logger.info(f'Start delete all cache file, path:{self.cache_repo_path}, job_id:{self._job_id}, '
                    f'sub_job_id:{self._sub_job_id}.')
        clear_repository_dir_exclude_prefix(self.cache_repo_path, self._job_id,
                                            CacheRepoFilename.RESTORE_POST_PROGRESS)
        # 向cache仓写标记文件供进度查询时使用
        result_file = os.path.join(self.cache_repo_path,
                                   f'{CacheRepoFilename.RESTORE_POST_PROGRESS}_{self._job_id}_{self._sub_job_id}')
        touch_file(result_file)
        if not self.is_storage_snapshot_copy():
            self.umount_bind_backup_path()
        clean_aix_restore_ln_path()
        response = ActionResult(code=ExecuteResultEnum.SUCCESS.value)
        output_result_file(self._pid, response.dict(by_alias=True))

    def abort_job(self):
        """
        终止任务
        """
        # 中断任务创建中断标识文件
        abort_file = os.path.join(self.cache_repo_path, "abort.ing")
        touch_file(abort_file)
        pid_list = psutil.pids()

        if platform.system().lower() == "windows":
            for pid in pid_list:
                process = psutil.Process(pid)
                try:
                    cmd = process.cmdline()
                except Exception as e:
                    logger.error(e, exc_info=True)
                    continue
                # 不能把abort_job这个任务杀掉(不能杀死自己)
                if not cmd:
                    continue
                if 'python' == cmd[0] and self._pid in cmd:
                    continue
                if 'python' == cmd[0] and self._job_id in cmd:
                    process.kill()
                    logger.info(f"The restore task has been terminated, job_id: {self._job_id}.")
                    break
        else:
            for pid in pid_list:
                process = psutil.Process(pid)
                cmd = process.cmdline()
                # 不能把abort_job这个任务杀掉(不能杀死自己)
                if 'python3' in cmd and self._pid in cmd:
                    continue
                if 'python3' in cmd and self._job_id in cmd:
                    process.kill()
                    logger.info(f"The restore task has been terminated, job_id: {self._job_id}.")
                    break
                    # 删除lun备份进度临时文件
            if self.is_storage_snapshot_copy():
                # 创建临时文件告知小工具任务终止
                stop_file = f"{PluginPathConstant.LUN_TOOL_STOP_FILE}_{self._job_id}"
                logger.info(f"{stop_file}")
                su_exec_touch_cmd(stop_file, 'root')
                logger.info("delete lun progress file")

                OracleStorageRestore(self._job_id, self._sub_job_id, self._pid,
                                     self.params_from_file).delete_lun_sub_job_progress_file_by_job_id()

        os.rename(abort_file, os.path.join(self.cache_repo_path, "abort.done"))
        logger.info(f"Succeed to abort restore job, job_id: {self._job_id}.")
        response = ActionResult(code=ExecuteResultEnum.SUCCESS.value)
        output_result_file(self._pid, response.dict(by_alias=True))

    def restore_post_job_progress(self):
        """
        查询恢复后置子任务进度
        """
        self.get_repo_info()
        result_file = os.path.join(self.cache_repo_path,
                                   f'{CacheRepoFilename.RESTORE_POST_PROGRESS}_{self._job_id}_{self._sub_job_id}')

        if check_file_exist(result_file):
            progress = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                     progress=OracleProgress.PROGRESS_ONE_HUNDRED,
                                     taskStatus=SubJobStatusEnum.COMPLETED.value)
            os.remove(result_file)
            logger.info(f'Restore post job executed successfully. Delete all cache file successfully, '
                        f'job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
        else:
            progress = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                     progress=OracleProgress.PROGRESS_FIFTY,
                                     taskStatus=SubJobStatusEnum.RUNNING.value)
        output_result_file(self._pid, progress.dict(by_alias=True))

    def create_p_file(self):
        logger.info(f'Start creating pfile, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
        params = json.loads(self.pfile_params)
        # 修改pfile中的实例名
        res_params = modify_instance_name_in_p_file(self.instances, params)
        # 插件生成一个uuid，用于临时保存pfile
        self.pfile_pid = general_uuid()
        p_file_uuid = self.pfile_pid
        # 恢复shell脚本中的文件名要求如此格式
        p_file_name = f'pfile{p_file_uuid}'
        if platform.system().lower() == "windows":
            p_file_path = f'{PluginPathConstant.WINDOWS_TMP_PATH.value}/{p_file_name}'
        else:
            p_file_path = f'{PluginPathConstant.NONE_ROOT_TMP_PATH.value}/{p_file_name}'
        flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
        modes = stat.S_IWUSR | stat.S_IRUSR
        with os.fdopen(os.open(p_file_path, flags, modes), 'w+') as file_content:
            for key, value in res_params.items():
                file_content.write(f'{key}={value}\n')
        if platform.system().lower() == "windows":
            logger.info(f'Create tmp pfile successfully, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
            return
        try:
            change_owner_by_name(p_file_path, self.oracle_user, self.oracle_group)
        except Exception as exception:
            msg = f'Change pfile owner to {self.oracle_user}:{self.oracle_group} failed'
            logger.error(f'{msg}, exception:{exception}, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
            raise Exception(msg) from exception

        p_file_mode = 0o750
        try:
            change_mod(p_file_path, p_file_mode)
        except Exception as exception:
            msg = f'Change pfile permission to {p_file_mode} failed'
            logger.error(f'{msg}, exception:{exception}, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')
            raise Exception(msg) from exception

        logger.info(f'Create tmp pfile successfully, job_id:{self._job_id}, sub_job_id:{self._sub_job_id}.')

    def is_storage_snapshot_copy(self):
        if self.params_from_file.get("job").get("copies")[0].get("type") in [CopyDataTypeEnum.S3_ARCHIVE.value,
                                                                             CopyDataTypeEnum.TAP_ARCHIVE]:
            lun_snapshots = self.params_from_file.get("job").get("copies")[0].get("extendInfo").get("extendInfo").get(
                "lun_snapshots", {})
        else:
            lun_snapshots = self.params_from_file.get("job").get("copies")[0].get("extendInfo").get("lun_snapshots", {})
        return True if lun_snapshots else False

    def is_table_restore(self):
        tables = self.params_from_file.get("job").get("extendInfo").get("tables")
        return tables is not None and len(tables) > 0

    def is_pdb_restore(self):
        pdb_list = self.params_from_file.get("job").get("copies")[0].get("protectObject").get("extendInfo").get("pdb")
        return pdb_list is not None and len(pdb_list) > 0

    def parse_cluster_param_for_table_or_pdb_restore(self, job):
        host_sn = get_host_sn()
        cluster_instances = job.target_object.extend_info.get('instances', '[]')
        if not json.loads(cluster_instances):
            cluster_instances = job.extend_info.get('nodesInfo', '[]')
        cluster_instance_list = json.loads(cluster_instances)
        list_cluster_other = []
        logger.info(f"cluster_instance_list:{cluster_instance_list},hostsn:{host_sn}")
        for cluster_instance in cluster_instance_list:
            if cluster_instance.get('hostId', '') == host_sn:
                self.db_instance = cluster_instance.get('inst_name')
                self.oracle_home = cluster_instance.get('oracle_home')
                self.oracle_base = cluster_instance.get('oracle_base')
                self.oracle_group = cluster_instance.get('oracle_group', 'oinstall')
                self.asm_sid_name = json.loads(cluster_instance.get('asm_info', '{}')).get('instName')
                try:
                    oracle_ip_infos = cluster_instance.get('oracle_ip_infos')
                    self.service_ip = ast.literal_eval(oracle_ip_infos)[0].get('ip')
                    self.port = ast.literal_eval(oracle_ip_infos)[0].get('port')
                except Exception as error:
                    logger.warning(f"oracle_ip_infos parse failed,{error}")
            else:
                list_cluster_other.append(cluster_instance.get('inst_name'))
        self.list_cluster_other = list_cluster_other

    def parse_param_for_storage_cluster(self, job):
        if self.is_lun_restore_job():
            return
        host_sn = get_host_sn()
        target_instance_json = job.target_object.extend_info.get('instances', '[]')
        if not json.loads(target_instance_json):
            target_instance_json = job.extend_info.get('nodesInfo', '[]')
        target_instances = json.loads(target_instance_json)
        for target_instance in target_instances:
            if host_sn == target_instance.get('hostId', ''):
                self.db_instance = target_instance.get('inst_name')
                self.oracle_home = target_instance.get('oracle_home')
                self.oracle_base = target_instance.get('oracle_base')
                self.oracle_group = target_instance.get('oracle_group', 'oinstall')
                self.asm_sid_name = json.loads(target_instance.get('asm_info', '{}')).get('instName')

    def is_lun_restore_job(self):
        if self.params_from_file.get('subJob'):
            sub_job = SubJob(**self.params_from_file.get('subJob'))
            if sub_job.job_name == RestoreJobName.LUN_RESTORE:
                return True
        return False

    def _parse_param_for_different_deploy_type(self, job):
        """
        解析不同部署场景的参数，分为：单机；集群

        :param job: 任务参数
        :return: None
        """
        if self.copy_sub_type == OracleDataBaseType.ORACLE_CLUSTER and \
                self.target_sub_type == OracleDataBaseType.ORACLE:
            # 集群副本恢复到单机
            instances = json.loads(job.copies[0].protect_object.extend_info.get('instances', '[{}]'))
            self.db_instance = job.target_object.extend_info.get('inst_name', '') \
                if self.is_table_restore() \
                else instances[0].get('inst_name', '')
            self.oracle_home = job.target_object.extend_info.get('oracle_home', '')
            self.oracle_base = job.target_object.extend_info.get('oracle_base', '')
            asm_info = json.loads(instances[0].get('asm_info', '{}'))
            self.asm_sid_name = asm_info.get('instName', '')
        elif self.target_sub_type == OracleDataBaseType.ORACLE:
            # 单机副本恢复到单机
            self.original_db_instance = job.copies[0].protect_object.extend_info.get('inst_name', '')
            self.db_instance = job.target_object.extend_info.get('inst_name', '')
            self.oracle_home = job.target_object.extend_info.get('oracle_home', '')
            self.oracle_base = job.target_object.extend_info.get('oracle_base', '')
            self.oracle_group = job.target_object.extend_info.get('oracle_group', 'oinstall')
            self.asm_sid_name = json.loads(job.copies[0].protect_object.extend_info.get('asm_info', '{}')).get(
                'instName', '')
        elif self.target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
            # RAC集群副本恢复到RAC集群
            if self.is_storage_snapshot_copy():
                self.parse_param_for_storage_cluster(job)
            elif self.is_table_restore() or self.is_pdb_restore():
                self.parse_cluster_param_for_table_or_pdb_restore(job)
            else:
                host_sn = get_host_sn()
                # 从target 的nodes里面获取index，再使用index从protectobj里面获取
                target_nodes = job.target_env.nodes
                node_idx = self._get_node_index_by_sn(target_nodes, host_sn)
                protect_instance_json = job.copies[0].protect_object.extend_info.get('instances', '[]')
                if not json.loads(protect_instance_json):
                    protect_instance_json = job.extend_info.get('nodesInfo', '[]')

                protect_instances = json.loads(protect_instance_json)
                if len(protect_instances) < len(target_nodes):
                    raise Exception(f'Length of nodesInfo smaller than target_env nodes, job_id:{self._job_id}.')
                self.db_instance = protect_instances[node_idx].get('inst_name')
                self.oracle_home = protect_instances[node_idx].get('oracle_home')
                self.oracle_base = protect_instances[node_idx].get('oracle_base')
                self.oracle_group = protect_instances[node_idx].get('oracle_group', 'oinstall')
                self.asm_sid_name = json.loads(protect_instances[node_idx].get('asm_info', '{}')).get('instName')
        # 如果指定了实例名，设置指定的实例名
        instance_name = get_instance_name_by_id(job.extend_info.get('instances', ''))
        if instance_name:
            self.db_instance = instance_name
        logger.info(f'Got params, db_instance:{self.db_instance}, oracle_home:{self.oracle_home} '
                    f'oracle_base:{self.oracle_base}, asm_sid_name:{self.asm_sid_name}, job_id:{self._job_id}.')

    def _get_node_index_by_sn(self, target_nodes, host_sn):
        for idx, node in enumerate(target_nodes):
            if node.get('id') == host_sn:
                return idx
        raise Exception(f'Can not get node index from target env info, host_sn:{host_sn}, job_id:{self._job_id}.')

    def _parse_param_for_different_restore_data(self):
        """
        解析不同恢复数据类型的参数，分为3个场景：普通数据副本；任意时间点；SCN

        :return: None
        """
        # restore_by用来区分数据副本恢复、任意时间点恢复、SCN恢复几种场景
        if self.pit_time:
            self.restore_by = RestoreByType.TIME
            # 使用时间恢复时脚本的scn需要传0
            self.scn_to_script = '0'
        elif self.pit_scn:
            self.restore_by = RestoreByType.SCN
            self.scn_to_script = self.pit_scn
        else:
            self.restore_by = RestoreByType.COPY

    def _parse_param_for_pdb_restore(self, job):
        """
        解析PDB级恢复参数

        :return: None
        """
        if self.copy_sub_type == OracleDataBaseType.ORACLE_PDB:
            self.parent_subtype = job.copies[0].protect_object.extend_info.get("parentSubType")
            self.db_name = job.copies[0].protect_object.parent_name
            self.pdb_names = ast.literal_eval(self.params_from_file.get("job").get("copies")[0].get("protectObject").
                                              get("extendInfo").get("pdb"))
            self.asm_sid_name = (self.params_from_file.get("job").get("copies")[0].get("protectObject").
                                 get("extendInfo").get("instName", "+ASM"))
            self.asm_install_user = (self.params_from_file.get("job").get("copies")[0].get("protectObject").
                                     get("extendInfo").get("asm_userName", "grid"))
            self.pdb_restore = self.is_pdb_restore()
        else:
            self.db_name = job.copies[0].protect_object.name

    def _parse_param_for_different_copy_type(self, job: RestoreJob):
        """
        解析不同副本类型的参数，分为2个场景：归档副本（s3/磁带）；其他副本（全量/增量/差异）

        :param job: 任务参数
        :return: None
        """
        if job.copies[0].type in [CopyDataTypeEnum.S3_ARCHIVE.value, CopyDataTypeEnum.TAP_ARCHIVE]:
            # 归档副本的扩展信息多嵌套了一层
            self.enc_algo = job.copies[0].extend_info.get('extendInfo').get('backup_algo', '')
            self.enc_key = job.copies[0].extend_info.get('extendInfo').get('backup_algo_value', '')
            data_copy_info = get_copy_info_by_type_list(
                job.copies,
                [CopyDataTypeEnum.S3_ARCHIVE, CopyDataTypeEnum.TAP_ARCHIVE])
            self.resetlogs_id = data_copy_info.extend_info.get('extendInfo').get('resetlogs_id')
        else:

            self.enc_algo = job.copies[0].extend_info.get('backup_algo', '')
            self.enc_key = job.copies[0].extend_info.get('backup_algo_value', '')
            # 从数据副本（全量、增量、差异）中获取resetlogs_id
            data_copy_info = get_copy_info_by_type_list(job.copies,
                                                        [CopyDataTypeEnum.FULL_COPY, CopyDataTypeEnum.INCREMENT_COPY,
                                                         CopyDataTypeEnum.DIFF_COPY])
            self.resetlogs_id = data_copy_info.extend_info.get('resetlogs_id')
        logger.info(f'Got params, resetlogs_id:{self.resetlogs_id}, job_id:{self._job_id}.')

    def _allow_first_job(self):
        # 单文件恢复不需要关闭数据库
        if self.restore_files:
            response = ActionResult(code=ExecuteResultEnum.SUCCESS.value)
            response.body_err = ErrorCode.SUCCESS.value
            logger.info(f'File restore, no need to close database, job_id:{self._job_id}.')
            output_result_file(self._pid, response.dict(by_alias=True))
            return
        # db_password为空，是否不调用check_db_status方法
        if platform.system().lower() == "windows" or self.is_pdb_restore():
            exit_status = ExecuteResultEnum.SUCCESS.value
        elif self.is_table_restore():
            inst_status = check_inst_status(self._pid, self.db_instance, self.oracle_user)
            exit_status = ExecuteResultEnum.SUCCESS.value if inst_status else ExecuteResultEnum.INTERNAL_ERROR.value
        else:
            script_param = {
                'InstanceName': self.db_instance,
                'AppName': self.db_name,
                'UserName': self.db_user,
                'Password': self.db_password,
                'CheckType': CheckDBTypeEnum.CheckDBClose.value,
                'OracleInstallUser': self.oracle_user,
                'AccessOracleHome': self.access_oracle_home,
                'AccessOracleBase': self.access_oracle_base
            }
            cmd = f"sh {PluginPathConstant.SCRIPTS_PATH.value}/" \
                  f"{RootCommand.ROOT_COMMAND_SCRIPT_CHECK_DB_STATUS.value} " \
                  f"{PluginPathConstant.SCRIPTS_PATH.value} {self._pid} {len(script_param)}"
            logger.info(f'Start checking whether restore is allowed in local node, cmd:{cmd}, job_id:{self._job_id}.')
            exit_status = execute_script(cmd, script_param)

        response = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR.value)
        if exit_status != ExecuteResultEnum.SUCCESS.value:
            response.body_err = ErrorCode.DB_NOT_CLOSED.value
            logger.error(f'Database is not closed, can not restore in this node, '
                         f'script exit status:{exit_status}, job_id:{self._job_id}.')
        else:
            response.code = ExecuteResultEnum.SUCCESS.value
            response.body_err = ErrorCode.SUCCESS.value
            logger.info(f'Database can restore in this node, job_id:{self._job_id}.')
        output_result_file(self._pid, response.dict(by_alias=True))

    def _allow_second_job(self):
        """
        第二阶段直接启动数据库，不用判断数据库是否关闭
        :return:
        """
        response = ActionResult(code=ExecuteResultEnum.SUCCESS.value)
        output_result_file(self._pid, response.dict(by_alias=True))

    def check_tns_configured(self):
        # 副本为cdb场景，检查是否配置tns
        logger.info("Check if tns is configured start.")
        json_tables = json.loads(self.params_from_file.get("job").get("extendInfo").get("tables"))
        pdb_name_list = set()
        for json_table in json_tables:
            pdb_name = json_table.get('pdb_name', '')
            if pdb_name:
                pdb_name_list.add(pdb_name)
        logger.info(f"Check if tns is configured. pdb_name_list {pdb_name_list}.")
        pdb_list = []
        for pdb_name in pdb_name_list:
            if platform.system().lower() == "windows":
                cmd = f"tnsping {pdb_name}"
                ret, output = execute_windows_cmd(self._pid, cmd.split(" "), "", )
            else:
                cmd = f"tnsping {pdb_name}"
                ret, output = execute_linux_cmd(self._pid, cmd, "")
            logger.info(f"Check if TNS is configured. ret {ret}. output {output}.")
            if not ret:
                logger.error(f"exec cmd: tnsping {pdb_name} failed.  pid = {self._job_id}")
            if 'Failed to resolve name' in output:
                logger.error(f"the tns for {pdb_name} not configured.  pid = {self._job_id}")
                pdb_list.append(pdb_name)
        logger.info(f"Check if tns is configured. pdb_list {pdb_list}.")
        if pdb_list:
            logger.error(f"the tns for {pdb_list} is not configured.")
            log_detail = LogDetail(logInfo=OracleReportLabel.PLUGIN_TNS_NOT_CONFIGURED_LABEL,
                                   logInfoParam=[str(pdb_list)],
                                   logLevel=DBLogLevel.ERROR.value,
                                   logDetailParam=[])
            self.report_job_details_linux_windows(OracleProgress.PROGRESS_FIFTY, log_detail,
                                                  SubJobStatusEnum.FAILED.value)
            return ScriptExitCode.ERROR_RESTORE_FAILED
        return ScriptExitCode.SUCCESS

    def _is_cdb_database_copy(self):
        """
        检查是否为cdb副本恢复

        @return: 是否为cdb数据库恢复
        """
        json_tables = json.loads(self.params_from_file.get("job").get("extendInfo").get("tables"))
        first_pdb_name = json_tables[0].get("pdb_name", None)
        if first_pdb_name and first_pdb_name != "":
            return True
        else:
            return False

    def report_job_details_linux_windows(self, process, log_detail, task_status):
        sub_job_detail = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=process,
                                       logDetail=[log_detail], taskStatus=task_status,
                                       extendInfo={'nodeId': get_host_sn()})
        if platform.system().lower() == Platform.WINDOWS:
            invoke_rpc_tool_interface(self._job_id,
                                      RpcToolInterface.REPORT_JOB_DETAIL, sub_job_detail.dict(by_alias=True))
        else:
            report_job_details(self._pid, sub_job_detail)
