#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import glob
import json
import locale
import os
import platform
import random
import re
import shlex
import signal
import time
import subprocess
import psutil

from common.cleaner import clear
from common.common import read_tmp_json_file, report_job_details, read_result_file, touch_file, \
    execute_cmd, execute_cmd_list, invoke_rpc_tool_interface
from common.common_models import ActionResult, LogDetail, SubJobDetails
from common.const import SubJobPolicyEnum, ExecuteResultEnum, CopyDataTypeEnum, RepositoryName, \
    DBLogLevel, SubJobStatusEnum, SubJobPriorityEnum, CMDResult, RpcToolInterface
from common.file_common import create_dir_recursive, delete_file
from common.parse_parafile import get_env_variable
from oracle import logger
from oracle.common.common import write_tmp_json_file, get_log_path_by_meta_type, parse_backup_path, asm_mount, \
    asm_startup, modify_asm_disk_string, modify_spfile_path, \
    get_recover_datafiles, recover_database_file, chown_file_path_owner, \
    login_asm_instance, get_grid_home, check_crsd_status, get_current_agent_uuid
from oracle.common.constants import ScriptExitCode, PluginPathConstant, RestoreTarget, LunToolStatus, \
    RestoreJobName, PexpectResult, Platform, ORACLEJsonConstant, RETURN_INVALID, RestoreType, RestoreByType, \
    OracleDataBaseType, ErrorCode, exit_code_error_code_map, OracleStartTypeEnum
from oracle.common.linux_common import write_file, execute_linux_rman_cmd
from oracle.common.lun_common import get_all_lun_wwn_info_in_linux_os, get_all_disk_num_unique_id_infos_in_windows_os, \
    get_lun_info_by_wwn, check_storage_device_access
from oracle.common.user_env_common import get_asm_user_env_by_linux
from oracle.common.windows_common import redirect_backup_path, modify_spfile_path_windows, \
    modify_asm_disk_string_windows, asm_startup_windows, execute_windows_rman_cmd, \
    get_grid_home_path, check_crsd_status_win, start_ora_crsd, shutdown_abort_windows, asmcmd_mount_windows, \
    modify_asm_spfile_path_windows
from oracle.schemas.oracle_schemas import SubJob
from oracle.services.restore.restore_common import exec_oracle_sql_cmd, get_register_ip, extract_ip, \
    check_hyper_metro_storage_status, login_oracle_database_restore, modify_asm_spfile_path
from oracle.services.restore.windows_sbin_func import unix_timestamp_to_date, start_open

SLEEP_TIME = 5


class OracleStorageRestore:
    def __init__(self, job_id, sub_job_id, pid, param_dict):
        self._job_id = job_id
        self._pid = pid
        self._sub_job_id = sub_job_id
        self._param_dict = param_dict
        self._job_param = param_dict.get("job", {})
        self._copy = self._job_param.get("copies", [])[0]
        self._target_obj = self._job_param.get("targetObject", {})
        self._target_sub_type = self._target_obj.get('subType', '')
        self._target_obj_auth_extend_info = self._target_obj.get("auth", {}).get("extendInfo", {})
        self._target_env = self._job_param.get("targetEnv", {})
        self._target_obj_extend_info = self._target_obj.get("extendInfo", {})
        self._copy_protect_obj_extend_info = self._copy.get("protectObject", {}).get("extendInfo", {})
        self.target_location = self._job_param.get("extendInfo", {}).get('targetLocation')
        self.pit_time = self._job_param.get("extendInfo", {}).get('restoreTimestamp', '')
        self.concurrent_requests = self._job_param.get("extendInfo", {}).get('concurrent_requests', '3')
        self.pit_scn = ''
        self.pit = ''
        self._query_progress_interval = 15
        self.data_repo_path = ''
        self.log_repo_path = ''
        self.cache_repo_path = ''
        self.meta_data_path = ''
        self.get_repo_info()
        self.db_name = ''
        self.db_instance = ''
        self.db_user = ''
        self.db_user_pwd = ''
        self.target_install_username = ''
        self.target_inst_name = ''
        self.target_asm_inst_name = ''
        self.target_asm_install_username = ''
        self.target_asm_install_userpwd = ''
        self.target_asm_disk_string = ''
        self.target_asm_disk_groups = ''
        self.target_cluster_inst_name = ''
        self.target_is_asm_inst = False
        self.parse_target_instance_params()
        self.parse_base_params()
        if self._copy.get("type") in [CopyDataTypeEnum.S3_ARCHIVE.value,
                                      CopyDataTypeEnum.TAP_ARCHIVE]:
            self._copy_extend = self._copy.get("extendInfo", {}).get("extendInfo", {})
        else:
            self._copy_extend = self._copy.get("extendInfo", {})
        self.is_start_db = ''
        self.login_params = {}

    @staticmethod
    def query_lun_sub_job_progress(sub_job_id):
        # 文件名字report_jobid_subjobid.rep，按subjobid查找进度文件
        file_list = glob.glob(os.path.join(PluginPathConstant.LUN_SUB_JOB_PROGRESS_PATH, f"*{sub_job_id}.rep"))
        if not file_list:
            logger.warn("lun sub job progress file not exist")
            return {}
        # 从小工具文件中获取进度值。
        try:
            sub_job_progress = read_tmp_json_file(file_list[0])
        except json.JSONDecodeError:
            logger.warn("lun sub job progress data error")
            return {}
        logger.info(f"sub_job_progress:{sub_job_progress}")
        return sub_job_progress

    def delete_lun_sub_job_progress_file_by_job_id(self):
        # 文件名字格式report_jobid_subjobid.rep，按jobid查找进度文件并删除，用于中止任务
        file_list = glob.glob(os.path.join(PluginPathConstant.LUN_SUB_JOB_PROGRESS_PATH, f"report_{self._job_id}*"))
        if not file_list:
            logger.warn(f"No report file!")
            abort_file = glob.glob(os.path.join(PluginPathConstant.LUN_SUB_JOB_PROGRESS_PATH,
                                                f"abort_oracle_backup_job_{self._job_id}"))
            if not abort_file:
                return
            for file in abort_file:
                if file and os.path.exists(file):
                    os.remove(file)
            return

        for file in file_list:
            try:
                sub_job_progress = read_tmp_json_file(file)
            except json.JSONDecodeError:
                logger.warn(f"Lun sub job progress data error.")
                return
            status = sub_job_progress.get('status', LunToolStatus.REPORT_STATUS_PREP)
            is_timeout = self.query_lun_tool_progress(status, file)
            if is_timeout:
                logger.warn(f"query_lun_tool_progress timeout.")
                self.kill_lun_tool_process()
                break
            logger.info(f"lun_tool_progress stop success, file {file}")

        # 删除告知临时文件
        abort_file = glob.glob(os.path.join(PluginPathConstant.LUN_SUB_JOB_PROGRESS_PATH,
                                            f"abort_oracle_backup_job_{self._job_id}"))
        if not abort_file:
            return
        for file in abort_file:
            if file and os.path.exists(file):
                os.remove(file)

    def query_lun_tool_progress(self, status, file):
        count = 0
        is_timeout = False
        while status != LunToolStatus.REPORT_STATUS_STOP and status != LunToolStatus.REPORT_STATUS_SUCC and status != \
                LunToolStatus.REPORT_STATUS_FAIL:
            time.sleep(self._query_progress_interval)
            try:
                sub_job_progress = read_tmp_json_file(file)
            except json.JSONDecodeError:
                logger.warn(f"Lun sub job progress data error.")
                return is_timeout
            if count > 120:
                # 如果超过30min还没有停止，说明lun工具执行卡住了，需要杀掉lun工具进程
                logger.warn("Lun tool stop failed : timeout!")
                is_timeout = True
                break
            status = sub_job_progress.get('status', LunToolStatus.REPORT_STATUS_PREP)
            logger.info(f"query_lun_tool_progress. status {status}")
            count = count + 1
        return is_timeout

    def kill_lun_tool_process(self):
        logger.info(f"kill_lun_tool_process start")
        pid_list = psutil.pids()
        for pid in pid_list:
            process = psutil.Process(pid)
            cmd = process.cmdline()
            # 找到lun工具进程并杀掉
            if 'LunOpTool' in cmd and self._job_id in cmd:
                process.kill()
                logger.info(f"lun_tool_process has been terminated, job_id: {self._job_id}, pid: {pid}.")

    @staticmethod
    def get_local_wwn_for_disk_infos(disk_infos, copy_disk_infos):
        logger.info(f"get_local_wwn_for_disk_infos")
        for disk_info in disk_infos:
            for copy_disk_info in copy_disk_infos:
                if disk_info.get('wwn') == copy_disk_info.get('wwn'):
                    disk_info['local_wwn'] = copy_disk_info.get('local_wwn')
                    break

    @staticmethod
    def is_target_disk(is_new_restore, disk_info, disk_wwn_info):
        if is_new_restore:
            return disk_info.get('target_path') == disk_wwn_info.disk_path
        else:
            return disk_info.get('local_wwn') == disk_wwn_info.wwn

    @staticmethod
    def is_target_disk_windows(is_new_restore, disk_info, disk_wwn_info):
        if is_new_restore:
            disk_num = re.findall(r"\d+", disk_info.get('disk_path'))[0]
            return disk_num == disk_wwn_info.num
        else:
            return disk_info.get('local_wwn') == disk_wwn_info.wwn

    @staticmethod
    def exec_recover_database(child):
        files_set = get_recover_datafiles(child)
        logger.info(f"exec_recover_database files_set, {files_set}.")
        for file in files_set:
            recover_database_file(child, file)

    @staticmethod
    def get_target_lun_wwn_in_linux_os(storage, disk_infos, is_new_restore):
        logger.info(f"get_target_lun_wwn_in_linux_os, {is_new_restore}")
        disk_wwn_infos = get_all_lun_wwn_info_in_linux_os(storage)
        for disk_info in disk_infos:
            for disk_wwn_info in disk_wwn_infos:
                if not OracleStorageRestore.is_target_disk(is_new_restore, disk_info, disk_wwn_info):
                    continue
                disk_info['lun_wwn'] = disk_wwn_info.wwn
        return disk_infos

    @staticmethod
    def get_target_lun_wwn_in_windows_os(disk_infos, is_new_restore):
        logger.info(f"start get_lun_info_in_windows_os, {is_new_restore}")
        disk_num_wwn_infos = get_all_disk_num_unique_id_infos_in_windows_os()
        for disk_info in disk_infos:
            for disk_wwn_info in disk_num_wwn_infos:
                if not OracleStorageRestore.is_target_disk_windows(is_new_restore, disk_info, disk_wwn_info):
                    continue
                disk_info['lun_wwn'] = disk_wwn_info.wwn
        return disk_infos

    @staticmethod
    def get_lun_infos_from_storage(disk_infos, device_manager):
        lun_infos = []
        for disk_info in disk_infos:
            lun_info = get_lun_info_by_wwn(disk_info.get('lun_wwn'), device_manager)
            disk_info['lun_info'] = json.dumps(lun_info.__dict__)
            lun_infos.append(lun_info)
        return disk_infos, lun_infos

    def get_target_lun_info(self, disk_infos, is_new_restore):
        logger.info(f"start get_target_lun_info {is_new_restore}")
        storages = json.loads(self._target_obj_auth_extend_info.get("storages", "[]"))
        if not storages:
            logger.error(f"storages  null")
            return [], ScriptExitCode.ERROR_STORAGE_AUTH_INFO_FAILED
        if platform.system().lower() == Platform.WINDOWS:
            disk_infos = self.get_target_lun_wwn_in_windows_os(disk_infos, is_new_restore)
        else:
            disk_infos = self.get_target_lun_wwn_in_linux_os(storages[0], disk_infos, is_new_restore)
        if not disk_infos:
            logger.error(f"disk_infos is null")
            return [], ErrorCode.INTERNAL_ERROR
        if len(storages) == 1:
            logger.info(f"get_target_lun_info storages is 1")
            storage = storages[0]
            password = json.loads(get_env_variable(f"job_targetObject_auth_extendInfo_storagesPwd_{self._pid}"))[0]
            ret, device_manager = check_storage_device_access(storage, password, self._pid)
            if not ret:
                logger.error(f"check_storage_device_access  false")
                return [], ScriptExitCode.ERROR_STORAGE_AUTH_INFO_FAILED
            disk_infos, _ = self.get_lun_infos_from_storage(disk_infos, device_manager)
            self.save_storage_device(0)
            return disk_infos, ErrorCode.SUCCESS
        # 双活存储检查双活pair状态
        for index, storage in enumerate(storages):
            storage = storages[index]
            password = json.loads(get_env_variable(f"job_targetObject_auth_extendInfo_storagesPwd_{self._pid}"))[
                index]
            logger.info(f"get_target_lun_info, storages {storage}")
            ret, device_manager = check_storage_device_access(storage, password, self._pid)
            if not ret:
                logger.error(f"check_storage_device_access  false")
                continue
            disk_infos, lun_infos = self.get_lun_infos_from_storage(disk_infos, device_manager)
            logger.info(f"get_target_lun_info disk_infos {disk_infos}")
            if check_hyper_metro_storage_status(lun_infos, device_manager):
                logger.info(f"check_metro_pair_storage_status True:{index}")
                self.save_storage_device(index)
                return disk_infos, 0
        logger.error(f"get_target_lun_info failed")
        return [], ScriptExitCode.ERROR_STORAGE_AUTH_INFO_FAILED

    def save_storage_device(self, storage_index):
        file_path = os.path.join(self.cache_repo_path, f'storage_index_{self._job_id}')
        select_storage = {
            'storage_index': storage_index
        }
        logger.info(f'save_storage_device file_path {file_path}, select_storage {select_storage}')
        write_tmp_json_file(file_path, self._job_id, select_storage)

    def get_storage_device(self):
        file_path = os.path.join(self.cache_repo_path, f'storage_index_{self._job_id}')
        return read_tmp_json_file(file_path).get('storage_index', -1)

    def exec_startup(self):
        logger.info(f"exec_startup")
        cmd = f"startup;"
        if exec_oracle_sql_cmd(self.login_params, cmd, PexpectResult.DATABASE_OPENED):
            return ScriptExitCode.SUCCESS
        else:
            return ScriptExitCode.ERROR_RESTORE_FAILED

    def start_ora_storage_and_check_status(self):
        if self.is_no_need_start_ora_storage():
            return
        if not self.target_asm_install_username:
            self.target_asm_install_username = 'grid'
        grid_home = get_grid_home(self._pid, self.target_asm_install_username)
        if not grid_home:
            logger.error(f"grid_home is empty")
            raise Exception(f"grid_home is empty")
        crsctl_path = os.path.join(grid_home, 'bin', 'crsctl')
        cmd_list = [f"{crsctl_path} start res ora.storage -init"]
        logger.info(f"crsctl_start_ora.storage, cmd {cmd_list}")
        return_code, std_out, std_err = execute_cmd_list(cmd_list)
        if return_code != CMDResult.SUCCESS:
            logger.error(f"crsctl_start_ora.storage failed, std_out {std_out} std_err {std_err}")
            raise Exception(f"crsctl_start_ora.storage failed, pid {self._pid}, std_err {std_err}")
        crsd_status = False
        count = 0
        # 检查crs状态，300s超时失败
        while not crsd_status:
            time.sleep(30)
            crsd_status = check_crsd_status(self._pid, self.target_asm_install_username)
            logger.info(f"check_crsd_status, crsd_status {crsd_status}")
            count = count + 1
            if count > 10:
                raise Exception(f"check_crsd_status failed, time out")

    def start_ora_crsd_and_check_status_win(self):
        if self.is_no_need_start_ora_storage():
            return
        ret, grid_home_path = get_grid_home_path()
        if ret != ExecuteResultEnum.SUCCESS.value:
            logger.error(f"grid_home is empty")
            raise Exception(f"grid_home is empty")

        return_code = start_ora_crsd(self._pid, grid_home_path)
        if return_code != ExecuteResultEnum.SUCCESS.value:
            logger.error(f"start_ora_crsd failed")
            raise Exception(f"start_ora_crsd failed, pid {self._pid}")
        crsd_status = False
        count = 0
        # 检查crs状态，300s超时失败
        while not crsd_status:
            time.sleep(30)
            crsd_status = check_crsd_status_win(self._pid, grid_home_path)
            logger.info(f"check_crsd_status, crsd_status {crsd_status}")
            count = count + 1
            if count > 10:
                raise Exception(f"check_crsd_status failed, time out")

    def start_ora_crsd_and_check_status(self):
        if self.is_no_need_start_ora_storage():
            return
        if not self.target_asm_install_username:
            self.target_asm_install_username = 'grid'
        grid_home = get_grid_home(self._pid, self.target_asm_install_username)
        if not grid_home:
            logger.error(f"grid_home is empty")
            raise Exception(f"grid_home is empty")

        asm_user_env = get_asm_user_env_by_linux(self.target_asm_install_username)
        cmd_list = [f"su - {self.target_asm_install_username} -c '{asm_user_env}crsctl start res ora.crsd -init'"]
        logger.info(f"crsctl_start_ora.crsd, cmd {cmd_list}")
        return_code, std_out, std_err = execute_cmd_list(cmd_list)
        if return_code != CMDResult.SUCCESS:
            logger.error(f"crsctl_start_ora.crsd failed, std_out {std_out} std_err {std_err}")
            raise Exception(f"crsctl_start_ora.crsd failed, pid {self._pid}, std_err {std_err}")

        crsd_status = False
        count = 0
        # 检查crs状态，300s超时失败
        while not crsd_status:
            time.sleep(30)
            crsd_status = check_crsd_status(self._pid, self.target_asm_install_username)
            logger.info(f"check_crsd_status, crsd_status {crsd_status}")
            count = count + 1
            if count > 10:
                raise Exception(f"check_crsd_status failed, time out")

    def exec_alter_database_open(self):
        logger.info(f"exec_alter_database_open")
        cmd = f"alter database open;"
        return exec_oracle_sql_cmd(self.login_params, cmd, PexpectResult.DATABASE_OPENED)

    def try_recover_database(self):
        logger.info(f"recover_database start pid {self._pid}.")
        result, child = login_oracle_database_restore(self._pid, self.target_inst_name, self.login_params,
                                                      self.target_install_username, 200)
        if not result:
            raise Exception(f'Login database failed.')
        try:
            self.exec_recover_database(child)
        except Exception as exception:
            logger.error(f"exception={exception}, pid={self._pid}")
            raise exception
        finally:
            logger.info(f"recover_database finally.")
            if platform.system().lower() == Platform.WINDOWS:
                child.kill(signal.SIGTERM)
            else:
                child.close()

    def parse_target_instance_params(self):
        if self._target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
            self.parse_target_instance_params_cluster()
        else:
            self.target_install_username = self._target_obj_extend_info.get('installUsername', '')
            self.target_inst_name = self._target_obj_extend_info.get('inst_name', '')
            asm_info = self._target_obj_extend_info.get('asm_info', {})
            if asm_info:
                self.target_is_asm_inst = True
                asm_info = json.loads(asm_info)
                self.target_asm_inst_name = asm_info.get('instName', '')
                self.target_asm_install_username = asm_info.get('installUsername', '')
                self.target_asm_disk_string = asm_info.get('diskString', '')
                self.target_asm_disk_groups = asm_info.get('diskGroups', '')

    def parse_target_instance_params_cluster(self):
        self.target_cluster_inst_name = self._target_obj.get('name', '')
        host_id = self._param_dict.get('subJob', {}).get('jobInfo', '')
        cluster_instances = self._target_obj_extend_info.get('instances', '{}')
        cluster_instances = json.loads(cluster_instances)
        for cluster_instance in cluster_instances:
            logger.info(f"parse_target_instance_params hostId {cluster_instance.get('hostId')}, host_id {host_id}")
            self.target_is_asm_inst = True if cluster_instance.get('is_asm_inst', None) == '1' else False
            if cluster_instance.get('hostId') == host_id:
                logger.info(f"parse_target_instance_params equals.")
                self.target_install_username = cluster_instance.get('installUsername', '')
                self.target_inst_name = cluster_instance.get('inst_name', '')
                asm_info = cluster_instance.get('asm_info', '{}')
                asm_info = json.loads(asm_info)
                self.target_asm_inst_name = asm_info.get('instName', '')
                self.target_asm_install_username = asm_info.get('installUsername', '')
                self.target_asm_disk_string = asm_info.get('diskString', '')
                self.target_asm_disk_groups = asm_info.get('diskGroups', '')
        logger.info(f"parse_target_instance_params_cluster target_inst_name {self.target_inst_name}")

    def modify_spfile_path(self):
        spfile_path = self.get_spfile_path()
        modify_spfile_path(self._pid, self.db_name, self.target_install_username, spfile_path)
        if self._target_sub_type == OracleDataBaseType.ORACLE:
            asm_spfile_path = self.get_asm_spfile_path()
            if asm_spfile_path:
                modify_asm_spfile_path(self._pid, self.target_asm_install_username, asm_spfile_path)

    def get_asm_spfile_path(self):
        asm_spfile_path = self._copy_extend.get(ORACLEJsonConstant.ASM_SPFILE_PATH, '')
        logger.info(f'get_asm_spfile_path {asm_spfile_path}')
        return asm_spfile_path

    def get_spfile_path(self):
        spfile_path = self._copy_extend.get(ORACLEJsonConstant.COPY_SPFILE_PATH, [])
        logger.info(f'modify_spfile_path, copy_spfile_path {spfile_path}')
        if not spfile_path:
            raise Exception('modify_spfile_path is empty')
        return spfile_path

    def modify_spfile_path_windows(self):
        spfile_path = self.get_spfile_path()
        modify_spfile_path_windows(self._pid, self.db_name, spfile_path)
        if self._target_sub_type == OracleDataBaseType.ORACLE:
            asm_spfile_path = self.get_asm_spfile_path()
            if asm_spfile_path:
                modify_asm_spfile_path_windows(self._pid, asm_spfile_path)

    def asm_mount_start(self):
        if not self.target_asm_disk_string:
            raise Exception('asm_disk_string is empty')
        if not self.target_asm_disk_groups:
            raise Exception('asm disk groups is empty')
        logger.info(f"asm_mount_start pid {self._pid}")
        if not self.target_asm_install_username:
            self.target_asm_install_username = 'grid'
        result, child = login_asm_instance(self._pid, self.target_asm_inst_name, self.target_asm_install_username,
                                           timeout=1200)
        if not result:
            raise Exception(f'Login asm failed.')

        try:
            self.exec_asm_mount_start(child)
        except Exception as exception:
            logger.error(f"exception={exception}, pid={self._pid}")
            raise exception
        finally:
            logger.info(f"asm_mount_start finally.")
            if platform.system().lower() == Platform.WINDOWS:
                child.kill(signal.SIGTERM)
            else:
                child.close()

    def exec_asm_mount_start(self, child):
        asm_startup(child)
        disk_groups = json.loads(self.target_asm_disk_groups)
        logger.info(f"exec_asm_mount_start, disk_groups {disk_groups}")
        for disk_group in disk_groups:
            asm_mount(child, disk_group)
        modify_asm_disk_string(child, self.target_asm_disk_string)

    def asm_mount_start_windows(self):
        if not self.target_asm_disk_string:
            raise Exception('asm_disk_string is empty')
        if not self.target_asm_disk_groups:
            raise Exception('asm disk groups is empty')
        logger.info(f"asm_mount_start_windows pid {self._pid}")
        asm_startup_windows(self._pid, self.target_asm_inst_name, self.asm_user, self.asm_user_pwd)
        disk_groups = json.loads(self.target_asm_disk_groups)
        logger.info(f"asm_mount_start_windows, disk_groups {disk_groups}")
        for disk_group in disk_groups:
            ret = asmcmd_mount_windows(self._pid, self.target_asm_inst_name, self.oracle_home, disk_group)
            if not ret:
                raise Exception("asm_mount_start_windows failed")
        ret = modify_asm_disk_string_windows(self._pid, self.target_asm_inst_name, self.asm_user, self.asm_user_pwd,
                                             self.target_asm_disk_string)
        if not ret:
            raise Exception("modify_asm_disk_string_windows failed")

    def get_repo_info(self):
        """
        获取data、meta、cache、log仓库信息
        """
        copies = self._job_param.get('copies')
        for copy_info in copies:
            repositories = copy_info.get("repositories", [])
            if copy_info.get('type') == CopyDataTypeEnum.LOG_COPY.value:
                # 在日志副本中获取log仓路径
                # 取第一个对象的第一个路径即可，此处log仓目录只获取其父目录
                self.log_repo_path = get_log_path_by_meta_type(repositories)
            else:
                # 在数据副本中获取data、meta、cache仓路径
                repo_paths = parse_backup_path(repositories)
                self.data_repo_path = repo_paths.get(RepositoryName.DATA_REPOSITORY)[0]
                self.create_cache_dir_if_not_exist(repo_paths)

    def create_cache_dir_if_not_exist(self, repo_paths):
        if not repo_paths.get(RepositoryName.CACHE_REPOSITORY):
            self.cache_repo_path = os.path.join(self.data_repo_path, f'cache_{self._job_id}')
            create_dir_recursive(self.cache_repo_path)
        else:
            self.cache_repo_path = repo_paths.get(RepositoryName.CACHE_REPOSITORY)[0]

    def restore_prerequisite(self):
        if self.target_location == RestoreTarget.NEW:
            if not self.check_storage_type():
                logger.error(f"Check storage type failed, pid:{self._pid}.")
                raise Exception(f"check_storage_type failed")
        if self._target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
            return ActionResult(code=ExecuteResultEnum.SUCCESS.value)
        else:
            # 获取磁盘lun信息
            disk_infos, error_code = self.get_target_disk_infos()
            logger.info(f"restore_prerequisite, error_code {error_code}, disk_infos {disk_infos} ")
            if not disk_infos:
                error_code = exit_code_error_code_map.get(error_code, ErrorCode.INTERNAL_ERROR)
                logger.error(f"get_target_disk_infos, error_code:{error_code}.")
                return ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, bodyErr=error_code.value)
            # 将待恢复磁盘wwn、目标磁盘lun信息写入临时仓
            file_path = os.path.join(self.cache_repo_path, f'disk_info_{self._job_id}')
            write_tmp_json_file(file_path, self._job_id, disk_infos)
            return ActionResult(code=ExecuteResultEnum.SUCCESS.value)

    def get_target_disk_infos(self):
        if self.target_location == RestoreTarget.ORIGINAL:
            disk_infos = json.loads(self._copy_extend.get("disk_infos"))
            disk_infos, error_code = self.get_target_lun_info(disk_infos, False)
        else:
            disk_infos = json.loads(self._job_param.get("extendInfo").get("diskInfos"))
            disk_infos, error_code = self.get_target_lun_info(disk_infos, True)
            copy_disk_infos = json.loads(self._copy_extend.get("disk_infos"))
            OracleStorageRestore.get_local_wwn_for_disk_infos(disk_infos, copy_disk_infos)
        return disk_infos, error_code

    def gen_sub_job(self):
        snapshot_node_ids, protect_node_ids = self.get_sub_job_node_ids()
        sub_job_nodes = snapshot_node_ids if snapshot_node_ids else protect_node_ids
        sub_jobs = []
        if self._target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
            self.gen_sub_job_cluster(sub_job_nodes, sub_jobs)
        else:
            file_path = os.path.join(self.cache_repo_path, f'disk_info_{self._job_id}')
            disk_infos = read_tmp_json_file(file_path)
            logger.info(f"gen_sub_job disk_infos :{len(disk_infos)}")
            self.gen_lun_restore_sub_job(disk_infos, sub_job_nodes, sub_jobs)
        # 数据库所有节点下发拉起数据库子任务
        for protect_node_id in protect_node_ids:
            sub_jobs.append(
                SubJob(jobId=self._job_id, execNodeId=protect_node_id,
                       policy=SubJobPolicyEnum.FIXED_NODE.value,
                       jobPriority=SubJobPriorityEnum.JOB_PRIORITY_3.value,
                       jobName=RestoreJobName.STARTUP_DATABASE,
                       jobInfo=protect_node_id))
        if self.restore_by == RestoreByType.SCN or self.restore_by == RestoreByType.TIME:
            # 数据库所有节点下发日志恢复子任务
            # 集群只需一个节点执行日志恢复，其余节点执行startup
            recover_node = random.choice(protect_node_ids)
            sub_jobs.append(
                SubJob(jobId=self._job_id, execNodeId=recover_node, policy=SubJobPolicyEnum.FIXED_NODE.value,
                       jobPriority=SubJobPriorityEnum.JOB_PRIORITY_4.value, jobName=RestoreJobName.RECOVERY_LOG,
                       jobInfo=recover_node))
            for protect_node_id in protect_node_ids:
                sub_jobs.append(
                    SubJob(jobId=self._job_id, execNodeId=protect_node_id, policy=SubJobPolicyEnum.FIXED_NODE.value,
                           jobPriority=SubJobPriorityEnum.JOB_PRIORITY_5.value,
                           jobName=RestoreJobName.LOG_RESTORE_STARTUP_DATABASE,
                           jobInfo=protect_node_id))

        logger.info(f"gen_sub_job sub_jobs :{sub_jobs}")
        return [obj.dict(by_alias=True) for obj in sub_jobs]

    def gen_sub_job_cluster(self, sub_job_nodes, sub_jobs):
        if self.target_location == RestoreTarget.ORIGINAL:
            disk_infos = json.loads(self._copy_extend.get('disk_infos'))
            agent_uuid = self._copy_extend.get('agent_uuid')
        else:
            disk_infos = json.loads(self._job_param.get('extendInfo').get('diskInfos'))
            agent_uuid = self._job_param.get('extendInfo').get('hostUuid')
        logger.info(f"gen_sub_job_cluster disk_infos :{disk_infos} agent_uuid {agent_uuid}")
        # 指定生产主机agent执行查询lun_info子任务
        sub_jobs.append(
            SubJob(jobId=self._job_id, policy=SubJobPolicyEnum.FIXED_NODE.value, execNodeId=agent_uuid,
                   jobPriority=SubJobPriorityEnum.JOB_PRIORITY_1.value,
                   jobName=RestoreJobName.GET_LUN_INFO))
        self.gen_lun_restore_sub_job(disk_infos, sub_job_nodes, sub_jobs)

    def gen_lun_restore_sub_job(self, disk_infos, sub_job_nodes, sub_jobs):
        sub_job_infos = {}
        for index, _ in enumerate(disk_infos):
            # 每个节点分解一个子任务，多个lun均匀的分配到子任务里
            node_id = sub_job_nodes[index % len(sub_job_nodes)]
            if sub_job_infos.get(node_id):
                snapshot_index = sub_job_infos.get(node_id, [])
                snapshot_index.append(index)
            else:
                snapshot_index = [index]
                sub_job_infos[node_id] = snapshot_index
        for key, value in sub_job_infos.items():
            sub_jobs.append(
                SubJob(jobId=self._job_id, execNodeId=key,
                       policy=SubJobPolicyEnum.FIXED_NODE.value,
                       jobName=RestoreJobName.LUN_RESTORE,
                       jobPriority=SubJobPriorityEnum.JOB_PRIORITY_2.value,
                       jobInfo=f'{value}'))

    def get_sub_job_node_ids(self):
        # protect_node_ids-保护资源的节点主机，snapshot_node_ids-恢复界面选择的代理主机
        snapshot_node_ids = []
        protect_node_ids = []
        nodes = self._target_env.get('nodes')
        for agent in nodes:
            if agent.get('extendInfo', {}).get('storage_snapshot_agent_flag', ''):
                snapshot_node_ids.append(agent.get('id'))
                if self.is_protect_node(agent):
                    protect_node_ids.append(agent.get('id'))
            else:
                protect_node_ids.append(agent.get('id'))
        logger.info(
            f"get_sub_job_node_ids snapshot_node_ids :{snapshot_node_ids}, protect_node_ids :{protect_node_ids}")
        return snapshot_node_ids, protect_node_ids

    def is_protect_node(self, agent):
        # 判断agent是否是保护资源的主机agent
        if self._target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
            cluster_instances = self._target_obj_extend_info.get('instances', '{}')
            cluster_instances = json.loads(cluster_instances)
            for cluster_instance in cluster_instances:
                if cluster_instance.get('hostId') == agent.get('id'):
                    return True

        else:
            if self._target_env.get('id') == agent.get('id'):
                return True
        return False

    def need_check_node_can_restore(self):
        nodes = self._target_env.get('nodes')
        agents = []
        for node in nodes:
            agent = {
                'ip': node.get('endpoint'),
                'id': node.get('id')
            }
            agents.append(agent)
        agent_uuid = get_current_agent_uuid(agents)
        logger.info(f"need_check_node_can_restore agent_uuid :{agent_uuid}")
        agent = {'id': agent_uuid}
        return self.is_protect_node(agent)

    def exe_restore(self):
        logger.info(f"Enter function exe_restore, pid:{self._pid}.")
        self.report_before_sub_job()
        sub_job_dict = self.restore_task_subjob_dict()
        sub_job_name = self._param_dict.get('subJob', {}).get('jobName', '')
        try:
            ret_code = sub_job_dict.get(sub_job_name)()
        except Exception as exception:
            logger.error(f"Restore exception {exception}, sub_job_id:{self._sub_job_id}")
            clear(self.db_user_pwd)
            return ScriptExitCode.ERROR_RESTORE_FAILED
        clear(self.db_user_pwd)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Restore failed, pid:{self._pid}.")
            return ret_code
        logger.info(f"function exe_restore success, pid:{self._pid}.")
        return ScriptExitCode.SUCCESS

    def restore_task_subjob_dict(self):
        sub_job_dict = {
            RestoreJobName.GET_LUN_INFO: self.get_lun_info,
            RestoreJobName.LUN_RESTORE: self.lun_restore,
            RestoreJobName.STARTUP_DATABASE: self.startup_database,
            RestoreJobName.LOG_RESTORE_STARTUP_DATABASE: self.log_restore_startup_database,
            RestoreJobName.RECOVERY_LOG: self.recovery_log
        }
        return sub_job_dict

    def startup_database(self):
        if platform.system().lower() == Platform.WINDOWS:
            ret_code = self.exec_start_open_windows()
        else:
            ret_code = self.exec_start_open()
        return ret_code

    def log_restore_startup_database(self):
        logger.info(f"log_restore_startup_database")
        if platform.system().lower() == Platform.WINDOWS:
            if self._target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
                self.start_ora_crsd_and_check_status_win()
                self.modify_spfile_path_windows()
                shutdown_abort_windows(self._pid, self.target_inst_name, self.db_user, self.db_user_pwd)
            if not start_open(self._pid, self.target_inst_name, self.db_user, self.db_user_pwd):
                logger.error(f"log_restore_startup_database failed")
                return ScriptExitCode.ERROR_RESTORE_FAILED
        else:
            if self._target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
                self.start_ora_crsd_and_check_status()
                self.modify_spfile_path()
                cmd = f"shutdown abort;"
                exec_oracle_sql_cmd(self.login_params, cmd, PexpectResult.SHUTDOWN, 60)
            cmd = f"startup;"
            if not exec_oracle_sql_cmd(self.login_params, cmd, PexpectResult.DATABASE_OPENED):
                logger.error(f"log_restore_startup_database failed")
                return ScriptExitCode.ERROR_RESTORE_FAILED
        return ScriptExitCode.SUCCESS

    def exec_start_open_windows(self):
        if self._target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
            return self.exec_start_open_windows_cluster()
        sub_job_id = self._param_dict.get('subJob', {}).get('subJobId', '{}')
        logger.info(f"function exec_start_open_windows, sub_job_id:{sub_job_id}.")
        self.modify_spfile_path_windows()
        if self.target_asm_inst_name:
            self.asm_mount_start_windows()
        if self.restore_by == RestoreByType.SCN or self.restore_by == RestoreByType.TIME:
            return ScriptExitCode.SUCCESS
        need_start = self.is_start_db != str(OracleStartTypeEnum.NOT_START_DB.value)
        if need_start and not start_open(self._pid, self.target_inst_name, self.db_user, self.db_user_pwd):
            return ScriptExitCode.ERROR_RESTORE_FAILED
        return ScriptExitCode.SUCCESS

    def exec_start_open_windows_cluster(self):
        if self.target_asm_inst_name:
            self.asm_mount_start_windows()
        if self.restore_by == RestoreByType.SCN or self.restore_by == RestoreByType.TIME:
            return ScriptExitCode.SUCCESS
        self.modify_spfile_path_windows()
        if not start_open(self._pid, self.target_inst_name, self.db_user, self.db_user_pwd):
            return ScriptExitCode.ERROR_RESTORE_FAILED
        return ScriptExitCode.SUCCESS

    def exec_start_open(self):
        if self._target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
            return self.exec_start_open_cluster()
        sub_job_id = self._param_dict.get('subJob', {}).get('subJobId', '{}')
        logger.info(f"function exec_start_open, sub_job_id:{sub_job_id}.")
        self.exec_mount_no_asm()
        if self.target_asm_inst_name:
            self.modify_spfile_path()
            self.asm_mount_start()
        if self.restore_by == RestoreByType.SCN or self.restore_by == RestoreByType.TIME:
            return ScriptExitCode.SUCCESS
        else:
            need_start = self.is_start_db != str(OracleStartTypeEnum.NOT_START_DB.value)
            if need_start and self.exec_startup() == ScriptExitCode.SUCCESS:
                return ScriptExitCode.SUCCESS
            self.try_recover_database()
            if not self.exec_alter_database_open():
                return ScriptExitCode.ERROR_RESTORE_FAILED
            return ScriptExitCode.SUCCESS

    def exec_mount_no_asm(self):
        logger.info(f"function exec_mount_no_asm, job_id:{self._job_id}.")
        is_new_restore = False
        if self.target_location == RestoreTarget.ORIGINAL:
            disk_infos = json.loads(self._copy_extend.get("disk_infos"))
        else:
            is_new_restore = True
            disk_infos = json.loads(self._job_param.get("extendInfo").get("diskInfos"))
        for disk_info in disk_infos:
            disk_uuid = disk_info.get('target_disk_uuid', '') if is_new_restore else disk_info.get('disk_uuid', '')
            mount_point = disk_info.get('mount_point', '')
            if disk_uuid and mount_point:
                cmd = f"mount UUID={disk_uuid} {mount_point}"
                logger.info(f"function exec_mount_no_asm, cmd:{cmd}.")
                return_code, std_out, std_err = execute_cmd(cmd)
                if return_code != CMDResult.SUCCESS.value:
                    raise Exception(f"exec_mount_no_asm error cmd {cmd}, std_out {std_out} std_err {std_err}")

    def exec_start_open_cluster(self):
        sub_job_id = self._param_dict.get('subJob', {}).get('subJobId', '{}')
        logger.info(f"function exec_start_open_cluster, sub_job_id:{sub_job_id}.")
        if self.target_asm_inst_name:
            self.asm_mount_start()
        if self.restore_by == RestoreByType.SCN or self.restore_by == RestoreByType.TIME:
            return ScriptExitCode.SUCCESS
        self.modify_spfile_path()
        if self.exec_startup() == ScriptExitCode.SUCCESS:
            return ScriptExitCode.SUCCESS
        self.try_recover_database()
        if not self.exec_alter_database_open():
            return ScriptExitCode.ERROR_RESTORE_FAILED
        return ScriptExitCode.SUCCESS

    def get_lun_info(self):
        # 获取磁盘lun信息
        disk_infos, error_code = self.get_target_disk_infos()
        logger.info(f"get_lun_info, error_code {error_code}, disk_infos {disk_infos} ")
        if not disk_infos:
            logger.error(f'job id: {self._job_id}, get_lun_info,failed {error_code}')
            return error_code
        # 将待恢复磁盘wwn、目标磁盘lun信息写入临时仓
        file_path = os.path.join(self.cache_repo_path, f'disk_info_{self._job_id}')
        write_tmp_json_file(file_path, self._job_id, disk_infos)
        return ScriptExitCode.SUCCESS

    def report_before_sub_job(self):
        host_ip_list = []
        nodes = self._target_env.get('nodes')
        for node in nodes:
            host_ip_list.append(node.get("endpoint", ""))
        reg_ip = get_register_ip(host_ip_list)
        if reg_ip:
            local_ip = reg_ip
        else:
            local_ip = extract_ip()[0]
        logger.info(f'report_before_sub_job  {local_ip}')
        log_detail = LogDetail(logInfo="agent_start_execute_sub_task_success_label",
                               logInfoParam=[local_ip, self._sub_job_id],
                               logLevel=DBLogLevel.INFO.value)
        job_detail = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=100,
                                   logDetail=[log_detail], taskStatus=SubJobStatusEnum.RUNNING.value)

        if platform.system().lower() == Platform.WINDOWS:
            invoke_rpc_tool_interface(self._job_id,
                                      RpcToolInterface.REPORT_JOB_DETAIL, job_detail.dict(by_alias=True))
        else:
            report_job_details(self._pid, job_detail)

    def lun_restore(self):
        # lun存储快照恢复（调用小工具接口恢复lun）
        storage_index = self.get_storage_device()
        logger.info(f'start lun restore {storage_index}')
        storage = json.loads(self._target_obj_auth_extend_info.get("storages", "[]"))[storage_index]
        password = json.loads(get_env_variable(f"job_targetObject_auth_extendInfo_storagesPwd_{self._pid}"))[
            storage_index]
        ret, device_manager = check_storage_device_access(storage, password, self._pid)
        if not ret:
            logger.error(f'lun restore, device_manager not access')
            return ScriptExitCode.ERROR_STORAGE_AUTH_INFO_FAILED

        restore_disk_infos = self.get_restore_disk_infos()
        if not restore_disk_infos:
            logger.error(f'job id: {self._job_id},get_restore_disk_infos failed')
            return ScriptExitCode.ERROR_SCRIPT_EXEC_FAILED
        if not self.data_repo_path:
            logger.error(f'job id: {self._job_id}, lun restore cmd failed, data_repo_path is null')
            return ScriptExitCode.ERROR_SCRIPT_EXEC_FAILED

        protocol = storage.get('transport_protocol', 'iscsi')
        sub_job_id = self._param_dict.get('subJob', {}).get('subJobId', '{}')
        # 拼装执行小工具的指令
        tool_args1 = (f"{PluginPathConstant.LUN_TOOL} -j {sub_job_id} -t restore -d {self.data_repo_path} "
                      f"-p {protocol} -c {self.concurrent_requests}")
        tool_args2 = f"{self.gen_lun_snapshot_restore_json_str(restore_disk_infos, storage, device_manager)}"
        logger.info(f"lun restore tool_args1: {tool_args1}")
        tool_args = shlex.split(tool_args1)

        # 开启小工具子进程
        logger.info(f"Start to run LunOpTool. Tool args : {tool_args}.")
        tool_process = subprocess.Popen(tool_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE, encoding="utf-8")
        # 等待并查询小工具进程是否启动
        time.sleep(SLEEP_TIME)
        process_name = 'LunOpTool'
        tool_not_running = True
        for process in psutil.process_iter(['pid', 'name']):
            if process_name in process.info['name'] and self.data_repo_path in process.cmdline():
                tool_not_running = False
                logger.info(f"LunOpTool is running, process id: {process.info['pid']}.")
                time.sleep(SLEEP_TIME)
                tool_process.stdin.write(f"{tool_args2}\n")
                tool_process.stdin.flush()
                break
        if tool_not_running:
            logger.error(f'job id: {self._job_id}, restore failed, LunOpTool is not running!')
            return ScriptExitCode.ERROR_SCRIPT_EXEC_FAILED

        if not self.wait_restore_result():
            logger.error(f'job id: {self._job_id}, lun restore result failed')
            return ScriptExitCode.ERROR_SCRIPT_EXEC_FAILED
        return ScriptExitCode.SUCCESS

    def get_restore_disk_infos(self):
        diskinfo_index = self._param_dict.get('subJob', {}).get('jobInfo', [])
        diskinfo_index_list = diskinfo_index.strip(']').strip('[').split(',')
        disk_infos = self.get_disk_infos_from_cache()
        logger.info(f"lun restore disk_infos: {disk_infos}, disk_info_index: {diskinfo_index_list}")
        restore_disk_infos = []
        for index in diskinfo_index_list:
            try:
                restore_disk_infos.append(disk_infos[int(index)])
            except KeyError:
                logger.error(f"lun_restore, disk_infos index error")
                return []
        return restore_disk_infos

    def get_disk_infos_from_cache(self):
        logger.info(f"get_disk_infos_from_cache")
        file_path = os.path.join(self.cache_repo_path, f'disk_info_{self._job_id}')
        disk_infos = read_tmp_json_file(file_path)
        if not disk_infos:
            logger.info(f"get_disk_infos_from_cache from windows")
            resource_id = self._copy.get("protectObject", {}).get("id", '')
            cache_path_split = self.cache_repo_path.split('/')
            split_path = '/'.join(cache_path_split[:-1])
            file_path = os.path.join(split_path, resource_id, self._job_id, f"disk_info_{self._job_id}")
            disk_infos = read_tmp_json_file(file_path)
        return disk_infos

    def wait_restore_result(self):
        # 15s循环查询小工具命令执行结果
        sub_job_id = self._param_dict.get('subJob', {}).get('subJobId', '{}')
        logger.info(f"lun_restore start sub_job_id : {sub_job_id}")
        progress = self.query_lun_sub_job_progress(sub_job_id)
        status = progress.get('status', LunToolStatus.REPORT_STATUS_PREP)
        logger.info(f"lun_restore start status : {status}, progress : {progress}")
        count = 0
        while status == LunToolStatus.REPORT_STATUS_PREP or status == LunToolStatus.REPORT_STATUS_PROG:
            time.sleep(self._query_progress_interval)
            progress = self.query_lun_sub_job_progress(sub_job_id)
            if progress == {} and count > 100:
                # 如果超过25min还没有任务进度文件，lun工具执行有问题，直接报错
                logger.error("lun tool exec failed")
                return False
            status = progress.get('status', LunToolStatus.REPORT_STATUS_PREP)
            logger.info(f"lun_restore status : {status}, progress : {progress}")
            count = count + 1
        if status == LunToolStatus.REPORT_STATUS_FAIL:
            return False
        return True

    def gen_lun_snapshot_restore_json_str(self, restore_disk_infos, storage, device_manager):
        param = dict()
        logger.info(f"device_manager.storage_ip:{device_manager.storage_ip} {self._job_id}")
        lun_list = []
        for disk_info in restore_disk_infos:
            lun_info = json.loads(disk_info.get('lun_info'))
            lun = {'parentLunId': lun_info.get('lun_id'), 'parentLunWWN': lun_info.get('wwn')}
            pre_snap_param = {
                'storageSnapId': '',
                'snapshotName': '',
                'snapshotWwn': disk_info.get('local_wwn'),
                'status': ''
            }
            lun['preSnapshotInfo'] = pre_snap_param
            cur_snap_param = {
                'storageSnapId': '',
                'snapshotName': '',
                'snapshotWwn': disk_info.get('wwn')
            }
            lun['curSnapshotInfo'] = cur_snap_param
            lun_list.append(lun)
        storage = {
            'ip': device_manager.storage_ip,
            'port': int(storage.get('port')),
            'username': storage.get('username'),
            'password': device_manager.storage_password,
            'enableCert': 0,
            'storageType': 'dorado',
            'lunList': lun_list
        }
        param['storages'] = [storage]
        logger.info(f"gen_lun_snapshot_restore_json_str: {param}")
        return json.dumps(param).replace(' ', '')

    def check_storage_type(self):
        # 新位置恢复，校验存储类型是否一致(asm不支持恢复到非asm)
        logger.info(f"Enter function check_storage_type, pid:{self._pid}.")
        if self._target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
            protect_obj_is_asm = False
            cluster_instances = json.loads(self._copy_protect_obj_extend_info.get('instances', '{}'))
            for cluster_instance in cluster_instances:
                if cluster_instance.get('is_asm_inst', None) == '1':
                    logger.info(f"cluster protect storage type is asm")
                    protect_obj_is_asm = True
                    break
        else:
            protect_obj_is_asm = True if self._copy_protect_obj_extend_info.get('is_asm_inst', None) == '1' else False
            logger.info(f"protect storage type is asm")
        if protect_obj_is_asm != self.target_is_asm_inst:
            logger.error(f"check_storage_type failed, pid:{self._pid}")
            return False
        # 新位置恢复，校验目标磁盘是否满足
        disk_infos = json.loads(self._job_param.get("extendInfo").get("diskInfos"))
        for disk_info in disk_infos:
            if disk_info.get("target_size") < disk_info.get("disk_size"):
                logger.error(f"check_storage_type failed, target_size not enough disk_info is {disk_info}")
                return False
        return True

    def parse_base_params(self):
        self.oracle_home = ''
        self.oracle_base = ''
        self.database_role = ''
        self.channels = ''
        self.channels_int = 4
        self.recover_target = ''
        self.recover_path = ''
        self.recover_order = ''
        self.storage_type = ''
        self.asm_sid_name = ''
        self.pfile_pid = ''
        self.pfile_name = ''
        self.oracle_user = ''
        self.ora_grid_user = ''
        self.backup = ''
        self.archive = ''
        self.log_job_id_list = ''
        self.bct_status = ''
        self.task_type = 0
        self.db_is_cluster = 0
        self.asm_user = ''
        self.asm_user_pwd = ''
        self.asm_install_user = ''
        self.restore_by = ''
        self.backup_path_list = ''
        self.main_backup_path = ''
        self.additional = ''
        self.copy_restore = 0
        self.is_enc_bk = 0
        self.restore_params = {}
        self.rman_params = {}
        self.sql_params = {}
        self.backup_all_version = ''
        self.upgrade = ''
        self.resetlogs_id = ''
        self.snapshot_time = ''

    def get_restore_vars(self, params):
        self.db_name = params['AppName']
        self.db_instance = params['InstanceName']
        self.db_user = params['UserName']
        self.db_user_pwd = params['Password']
        self.oracle_home = params['OracleHome']
        self.oracle_base = params['OracleBase']
        self.asm_user = params['ASMUserName']
        self.asm_user_pwd = params['ASMPassword']
        self.channels = params['Channel']
        self.pit_time = params['pitTime']
        self.pit_scn = params['pitScn']
        self.recover_target = params['recoverTarget']
        self.recover_path = params['recoverPath']
        self.recover_order = params['recoverOrder']
        self.storage_type = params['storType']
        self.asm_sid_name = params['ASMInstanceName']
        self.pfile_pid = params['pfilePID']
        self.restore_by = params['RestoreBy']
        self.oracle_user = params['OracleInstallUser']
        self.asm_install_user = params['GridInstallUser']
        self.backup = params['DataPath']
        self.meta_data_path = params['MetaDataPath']
        self.archive = params['LogPath']
        self.log_job_id_list = params['JobIdList']
        self.bct_status = params['bctStatus']
        self.is_start_db = params['isStartDB']
        self.login_params = {
            'pid': self._pid,
            "instance_name": self.target_inst_name,
            "db_install_user": self.target_install_username,
            "auth_pwd": f"job_copies_0_protectObject_auth_authPwd_{self._pid}",
            "auth_key": f"job_copies_0_protectObject_auth_authKey_{self._pid}"
        }

    def define_params(self):
        self.backup_path_list = redirect_backup_path(self.backup)
        self.main_backup_path = self.backup_path_list.split(";")[0]
        self.additional = os.path.join(self.main_backup_path, "additional")
        if platform.system().lower() == "windows":
            encoding = locale.getdefaultlocale()[1]
            scn_dbf_max_content = read_result_file(os.path.join(self.additional, "scn_dbf_max"),
                                                   encoding=encoding).strip()
        else:
            scn_dbf_max_content = read_result_file(os.path.join(self.additional, "scn_dbf_max")).strip()
        pit_scn_max = scn_dbf_max_content.split()[0]
        logger.info(f"pit_scn_max={pit_scn_max}")
        if self.pit_scn and int(self.pit_scn) < int(pit_scn_max):
            logger.info(
                f"Base media snapshot used to restore is newer than PIT to restore {self.pit_time}, pid:{self._pid}.")
        if not self.pit_time and not self.pit_scn:
            self.pit_scn = pit_scn_max
        self.copy_restore = RestoreType.ARCHICE_RESTORE
        if not self.archive:
            self.copy_restore = RestoreType.FULL_RESTORE
        if self.restore_by == 1:
            self.copy_restore = RestoreType.FULL_RESTORE
        if self.pit_time:
            pit_time_date = unix_timestamp_to_date(self.pit_time)
            self.pit = f"time \"to_date('{pit_time_date}', 'YYYY-MM-DD HH24:MI:SS')\""
        if self.pit_scn != "0":
            self.pit = f"scn {self.pit_scn}"
        channels = 4
        if self.channels:
            channels = int(self.channels)
        if channels == 0:
            channels = 4
        if channels > 254:
            logger.error(f"Channels param is invalid, Channels={self.channels}, pid:{self._pid}.")
            return RETURN_INVALID
        self.channels_int = channels
        if platform.system().lower() == "windows":
            encoding = locale.getdefaultlocale()[1]
            dbinfo_content = read_result_file(os.path.join(self.additional, "dbinfo"), encoding=encoding).strip()
        else:
            dbinfo_content = read_result_file(os.path.join(self.additional, "dbinfo")).strip()
        dbinfo_content_list = dbinfo_content.split(';')
        self.resetlogs_id = dbinfo_content_list[8]
        logger.info(f"dbinfo_content_list:{dbinfo_content_list}")
        snapshot_timestamp = dbinfo_content_list[10]
        logger.info(f"snapshot_timestamp:{snapshot_timestamp}")
        self.snapshot_time = f"\"to_date('{unix_timestamp_to_date(snapshot_timestamp)}', 'YYYY-MM-DD HH24:MI:SS')\""
        return ScriptExitCode.SUCCESS

    def recovery_log(self):
        cmd = f"startup nomount force;"
        if self._target_sub_type == OracleDataBaseType.ORACLE_CLUSTER:
            pfile_path = os.path.join(self.data_repo_path, 'pfile', f"ebackup-{self.db_name}-pfile.ora")
            if platform.system().lower() != "windows":
                chown_file_path_owner(pfile_path, self.oracle_user)
            cmd = f"startup nomount pfile='{pfile_path}';"
        logger.info(f"startup nomount, {cmd}")
        exec_oracle_sql_cmd(self.login_params, cmd, None)
        self.define_params()
        self.define_restore_params()
        ret_code = self.restore_control_file()
        if ret_code != ScriptExitCode.SUCCESS:
            return RETURN_INVALID
        ret_code = self.alter_database_mount()
        if ret_code != ScriptExitCode.SUCCESS:
            return RETURN_INVALID
        ret_code = self.recover_database()
        if ret_code != ScriptExitCode.SUCCESS:
            return RETURN_INVALID
        return ScriptExitCode.SUCCESS

    def define_restore_params(self):
        self.restore_params = {
            'pid': self._pid,
            'db_instance': self.db_instance,
            'db_user': self.db_user,
            'db_user_pwd': self.db_user_pwd,
            'db_name': self.db_name,
            'asm_sid_name': self.asm_sid_name,
            'oracle_home': self.oracle_home,
            'task_type': self.task_type,
            'recover_path': self.recover_path,
            'main_backup_path': self.main_backup_path,
            'backup_path_list': self.backup_path_list,
            'additional': self.additional,
            'archive': self.log_repo_path
        }
        self.rman_params = {
            "is_enc_bk": self.is_enc_bk,
            "instance_name": self.db_instance,
            "db_install_user": self.oracle_user,
            "db_user": self.db_user,
            "db_password": self.db_user_pwd,
            "rman_enc_section": ""
        }
        self.sql_params = {
            "is_silence": 1,
            "db_user": self.db_user,
            "db_password": self.db_user_pwd,
        }

    def restore_control_file(self):
        control_file_path = self.get_control_path()
        logger.info(f"control_file_path:{control_file_path}")
        sql_content = f"restore controlfile from '{control_file_path}';"
        return self.execute_rman_cmd(sql_content, "restore_control_file")

    def alter_database_mount(self):
        control_file_path = self.get_control_path()
        logger.info(f"control_file_path:{control_file_path}")
        sql_content = "sql 'alter database mount';"
        return self.execute_rman_cmd(sql_content, "alter_database_mount")

    def recover_database(self):
        sql_content = self.build_restore_sql()
        return self.execute_rman_cmd(sql_content, "recover_database")

    def build_restore_sql(self):
        logger.info(f"Enter function build_restore_sql, pid:{self._pid}.")
        tmp_sql_content = f"RUN\n" \
                          "{\n"
        index = 1
        while index <= self.channels_int:
            formatted_index = '{:02d}'.format(index)
            tmp_sql_content += f"    allocate channel eRestore{formatted_index} type disk;\n"
            index += 1
        tmp_sql_content += f"    configure device type disk parallelism {self.channels_int};\n"
        logger.info(f"Start catalog archive log, log joblist :{self.log_job_id_list}, pid:{self._pid}.")
        jobid_id_list_arr = self.log_job_id_list.split(",")
        if not jobid_id_list_arr:
            logger.warn(f"jobid_id_list_arr is none")
            tmp_sql_content += f"    catalog start with '{self.archive}' noprompt;"
        else:
            for job_id in jobid_id_list_arr:
                if not job_id:
                    continue
                resetlogs_id_path = os.path.join(self.archive, job_id, f"resetlogs_id{self.resetlogs_id}")
                tmp_sql_content += f"    catalog start with '{resetlogs_id_path}' noprompt;"
        tmp_sql_content += f"    recover database until {self.pit} snapshot time {self.snapshot_time};\n"
        tmp_sql_content += "sql 'alter database open resetlogs';\n"
        index = 1
        while index <= self.channels_int:
            formatted_index = '{:02d}'.format(index)
            tmp_sql_content += f"    release channel eRestore{formatted_index};\n"
            index += 1
        tmp_sql_content += "}"
        logger.info(f"build_restore_sql tmp_sql_content:{tmp_sql_content}")
        return tmp_sql_content

    def get_control_path(self):
        jobid_id_list_arr = self.log_job_id_list.split(",")
        job_mtimes = []
        for job_id in jobid_id_list_arr:
            if not job_id:
                continue
            resetlogs_id_path = os.path.join(self.archive, job_id, f"resetlogs_id{self.resetlogs_id}")
            mtime = os.path.getmtime(resetlogs_id_path)
            job_mtimes.append(JobIdMTime(job_id, mtime))
        sorted_job = sorted(job_mtimes, key=lambda x: x.mtime, reverse=True)
        logger.info(f"sorted_job:{sorted_job}")
        return os.path.join(self.archive, sorted_job[0].job_id, f"resetlogs_id{self.resetlogs_id}", "controlfile.ctl")

    def execute_rman_cmd(self, rman_cmd, file_prefix):
        sql_file = os.path.join(PluginPathConstant.WINDOWS_TMP_PATH, f"{file_prefix}_{self._pid}.sql")
        sql_rst = os.path.join(PluginPathConstant.WINDOWS_TMP_PATH, f"{file_prefix}_{self._pid}.rst")
        if platform.system().lower() != "windows":
            sql_file = os.path.join(PluginPathConstant.STMP_PATH, f"{file_prefix}_{self._pid}.sql")
            sql_rst = os.path.join(PluginPathConstant.STMP_PATH, f"{file_prefix}_{self._pid}.rst")
            touch_file(sql_file)
            touch_file(sql_rst)
            execute_cmd(f"chmod 700 {sql_file}")
            execute_cmd(f"chmod 700 {sql_rst}")
            chown_file_path_owner(sql_file, self.oracle_user)
            chown_file_path_owner(sql_rst, self.oracle_user)
        write_file(sql_file, rman_cmd)
        try:
            if platform.system().lower() == "windows":
                enc_type = 0
                logger.info("execute_windows_rman_cmd begin")
                ret_code = execute_windows_rman_cmd(self._pid, self.rman_params, enc_type, sql_file, sql_rst)
            else:
                ret_code = execute_linux_rman_cmd(self._pid, self.rman_params, 0, sql_file, sql_rst)
            if ret_code != ScriptExitCode.SUCCESS:
                if platform.system().lower() == "windows":
                    encoding = locale.getdefaultlocale()[1]
                    logger.error(f"execute rman cmd: {rman_cmd} failed, pid:{self._pid}, ret_code:{ret_code}, "
                                 f"error is {read_result_file(sql_rst, encoding=encoding)}")
                else:
                    logger.error(f"execute rman cmd: {rman_cmd} failed, pid:{self._pid}, ret_code:{ret_code}, "
                                 f"error is {read_result_file(sql_rst)}")
                return ret_code
            logger.info(f"execute rman cmd: {rman_cmd} success, pid:{self._pid}.")
            return ScriptExitCode.SUCCESS
        finally:
            delete_file(sql_file)
            delete_file(sql_rst)

    def is_no_need_start_ora_storage(self):
        # oralce 12c之前的版本不需要启动
        version = self._copy_protect_obj_extend_info.get('version', '')
        item = str(version).split('.')[0]
        return int(item) < 12


class JobIdMTime(object):
    def __init__(self, job_id, mtime):
        self.job_id = job_id
        self.mtime = mtime
