#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import locale
import os
import glob
import json
import platform
import random
import shlex
import signal
import subprocess
import time
import psutil

from common.common_models import LogDetail, SubJobDetails
from oracle import logger
from common.common import execute_cmd, read_tmp_json_file, get_previous_copy_info, read_result_file, \
    report_job_details, invoke_rpc_tool_interface
from common.const import SubJobPolicyEnum, BackupTypeEnum, RpcParamKey, DBLogLevel, SubJobStatusEnum, RpcToolInterface
from common.file_common import delete_file, exec_lchown_dir_recursively
from common.util.exec_utils import exec_mkdir_cmd
from oracle.common.backup_common import get_asm_spfile_path

from oracle.common.constants import PluginPathConstant, ScriptExitCode, ORACLEJsonConstant, \
    LunToolStatus, PexpectResult, OracleFilePathSql, ArchiveLogMode, LOG_IS_VALID, OracleDataBaseType, \
    BackupSubJobName, BackupTypeStr, OracleReportLabel, ErrorCode, Platform
from oracle.common.common import login_oracle_database, exec_sql_cmd, get_oracle_file_paths, get_database_info, \
    get_spfile_path, get_scn_and_time, \
    write_cert_2_file, get_current_agent_uuid, create_pfile_from_spfile_linux
from oracle.common.device_manager.device_info import DeviceInfo, ProtectGroupInfo
from oracle.common.linux_common import write_file, add_unix_timestamp, get_linux_oracle_version
from oracle.common.lun_common import get_asm_disk_info_in_linux_os, get_no_asm_disk_info_in_linux_os, \
    get_disk_info_by_asm_tool_in_windows_os, get_disk_info_no_asm_in_windows_os, get_all_lun_wwn_info_in_linux_os, \
    get_all_disk_num_unique_id_infos_in_windows_os, set_lun_info_for_disk_info, check_is_support_new_location_restore, \
    check_storage_device_access, check_lun_metro_pair_status, check_is_support_new_location_restore_windows
from oracle.common.windows_common import get_oracle_version, create_dir, create_pfile_from_spfile
from oracle.schemas.oracle_schemas import SubJob
from oracle.services.storage.storage_v6_manager import StorageV6Manager
from oracle.common.common import parse_backup_path, write_tmp_json_file, get_env_variable

SLEEP_TIME = 5


class OracleStorageBackupService(object):
    def __init__(self, pid, job_id, params, backup_params=None):
        self._job_id = job_id
        self._pid = pid
        self._param_dict = params
        self._backup_param = backup_params
        self._job_dict = self._param_dict.get("job", {})
        self._job_param = self._job_dict.get("jobParam", {})
        self._job_extend_info = self._job_dict.get("extendInfo", {})
        self._protect_obj = self._job_dict.get("protectObject", {})
        self._protect_obj_extend_info = self._protect_obj.get("extendInfo", {})
        self._protect_obj_auth_extend_info = self._protect_obj.get("auth", {}).get("extendInfo", {})
        self._protect_env = self._job_dict.get("protectEnv", {})
        self._concurrent_requests = self._job_dict.get("extendInfo", {}).get("concurrent_requests", '3')
        self._data_area = ''
        self._log_area = ''
        self._meta_area = ''
        self._cache_area = ''
        self._query_progress_interval = 15
        self.get_repository()

        self.db_instance = ""
        self.db_user = ""
        self.db_password = ""
        self.db_name = ""
        self.level = ""
        self.db_install_user = ""
        self.asm_id_name = ""
        self.oracle_home = ""
        self.oracle_base = ""
        self.oracle_group = ""
        self.asm_user = ""
        self.db_is_cluster = True \
            if self._protect_obj.get("subType", "") == OracleDataBaseType.ORACLE_CLUSTER else False
        self.is_enc_bk = 0
        self.backup_path_list = ""
        self.ora_version = ""
        self.ora_pre_version = ""
        self.ora_pre_version_extend = ""
        self.main_backup_path = ""
        self.rman_enc_section = ""
        self.log_is_backed_up = ""
        self.result_file = ""
        self.result_content = ""
        self.resetlogs_id = ""
        self.additional = ""
        self.open_mode = ""
        self.login_db_params = {}
        self.run_rman_params = {}

    def get_disk_lun_wwn_infos(self, is_asm, need_ocr_disk):
        if platform.system().lower() == "windows":
            wwn_infos, disk_infos = self.get_disk_wwn_info_in_windows_os(is_asm, need_ocr_disk)
        else:
            wwn_infos, disk_infos = self.get_disk_wwn_info_in_linux_os(is_asm, need_ocr_disk)
        wwn_disk_infos = {
            'wwn_infos': json.dumps(wwn_infos),
            'disk_infos': json.dumps(disk_infos),
            'is_support_new_loc_restore': not need_ocr_disk
        }
        file_path = os.path.join(self._cache_area, f'wwn_disk_infos_{self._job_id}')
        write_tmp_json_file(file_path, self._job_id, wwn_disk_infos)
        return wwn_infos, disk_infos

    def get_disk_wwn_info_in_linux_os(self, is_asm, need_ocr_disk):
        # 存放磁盘名称：如：/dev/sda1
        if is_asm:
            disk_infos = get_asm_disk_info_in_linux_os(self.login_db_params, self.asm_user, need_ocr_disk,
                                                       self.db_is_cluster)
        else:
            disk_infos = get_no_asm_disk_info_in_linux_os(self.login_db_params)
        logger.info(f"disk_infos:{disk_infos}")
        # 获取磁盘和wwn之间的对应关系
        storages = self._protect_obj_auth_extend_info.get("storages", "[]")
        storage = json.loads(storages)[0]
        disk_wwn_infos = get_all_lun_wwn_info_in_linux_os(storage)
        logger.info(f"disk_wwn_infos {disk_wwn_infos}")
        wwn_info = []
        for disk_info in disk_infos:
            for disk_wwn_info in disk_wwn_infos:
                if disk_info.get('disk_path') == disk_wwn_info.disk_path:
                    wwn_info.append(disk_wwn_info.wwn)
                    disk_info['wwn'] = disk_wwn_info.wwn
        return wwn_info, disk_infos

    def get_disk_wwn_info_in_windows_os(self, is_asm, need_ocr_disk):
        if is_asm:
            # 获取oracle对应的磁盘号
            disk_infos = get_disk_info_by_asm_tool_in_windows_os(self.login_db_params, need_ocr_disk)
        else:
            # 通过文件获取磁盘号
            disk_infos = get_disk_info_no_asm_in_windows_os(self.login_db_params)
        logger.info(f"disk_infos:{disk_infos}")
        # 获取盘号对应的wwn信息
        disk_num_wwn_infos = get_all_disk_num_unique_id_infos_in_windows_os()
        logger.info(f"disk_num_wwn_infos:{disk_num_wwn_infos}")
        wwn_info = []
        for disk_info in disk_infos:
            for disk_wwn_info in disk_num_wwn_infos:
                if disk_info.get('disk_num') == disk_wwn_info.num:
                    wwn_info.append(disk_wwn_info.wwn)
                    disk_info['wwn'] = disk_wwn_info.wwn
        return wwn_info, disk_infos

    def get_storage_device_index(self, storages):
        file_path = os.path.join(self._cache_area, f'storage_index_{self._job_id}')
        storage_index = read_tmp_json_file(file_path).get('storage_index', -1)
        logger.info(f'get_storage_device from cache {storage_index}')
        if storage_index == -1:
            access_storage_index = self.get_all_access_storage_device_index(storages)
            if access_storage_index:
                storage_index = access_storage_index[0]
        logger.info(f'job id: {self._job_id}, get_storage_device {storage_index}')
        self.save_storage_device(storage_index)
        return storage_index

    def get_all_access_storage_device_index(self, storages):
        if not storages:
            return []
        access_storage_index = []
        wwn_infos, disk_infos, _ = self.get_wwn_disk_infos()
        if len(wwn_infos) == 0 or len(disk_infos) == 0:
            return access_storage_index
        if len(storages) == 1:
            # 单个存储只校验连通性
            password = json.loads(get_env_variable(f"job_protectObject_auth_extendInfo_storagesPwd_{self._pid}"))[0]
            if check_storage_device_access(storages[0], password, self._pid):
                access_storage_index.append(0)
            return access_storage_index
        # 双活存储检查双活pair状态
        for index, storage in enumerate(storages):
            password = json.loads(get_env_variable(f"job_protectObject_auth_extendInfo_storagesPwd_{self._pid}"))[index]
            if self.check_hyper_metro_storage_status(wwn_infos, storage, password):
                access_storage_index.append(index)
        logger.info(f"get_all_access_storage_device {access_storage_index}")
        return access_storage_index

    def check_hyper_metro_storage_status(self, wwn_infos, storage, password):
        ret, device_manager = check_storage_device_access(storage, password, self._pid)
        if not ret:
            logger.warning(f"check_metro_pair_storage_status device_manager failed")
            return False
        ret, lun_infos = StorageV6Manager.query_lun_infos_by_wwn(wwn_infos, device_manager)
        if not ret:
            logger.warning(f"check_metro_pair_storage_status query_lun_infos_by_wwn failed")
            return False
        ret, hyper_metro_pairs = StorageV6Manager.query_lun_hyper_metro_pair(device_manager)
        if not ret:
            logger.warning(f"check_metro_pair_storage_status query_lun_hyper_metro_pair failed")
            return False
        return check_lun_metro_pair_status(lun_infos, hyper_metro_pairs)

    def check_last_backup_storage_is_same(self):
        self.prepare_params()
        storages = json.loads(self._protect_obj_auth_extend_info.get("storages", "[]"))
        access_storage_index = self.get_all_access_storage_device_index(storages)
        if not access_storage_index:
            # 存储设备都不可用
            logger.error(f"select_storages is null.")
            return False, -1
        previous_data_copy = get_previous_copy_info(self._protect_obj,
                                                    [RpcParamKey.FULL_COPY, RpcParamKey.INCREMENT_COPY,
                                                     RpcParamKey.DIFF_COPY], self._job_id)
        last_storage_device = previous_data_copy.get("extendInfo", {}).get(ORACLEJsonConstant.EXEC_STORAGE, '')
        logger.info(f"last_storage_device {last_storage_device}.")
        for index in access_storage_index:
            storage = storages[index]
            logger.info(f"access_storage_index {storage.get('ipList')}.")
            if last_storage_device.__contains__(storage.get('ipList')) or storage.get('ipList').__contains__(
                    last_storage_device):
                self.save_storage_device(index)
                logger.info(f"check_last_backup_storage_is_same True")
                return True, index
        self.save_storage_device(access_storage_index[0])
        logger.info(f"check_last_backup_storage_is_same False")
        return False, access_storage_index[0]

    def save_storage_device(self, storage_index):
        file_path = os.path.join(self._cache_area, f'storage_index_{self._job_id}')
        select_storage = {
            'storage_index': storage_index
        }
        logger.info(f'save_storage_device file_path {file_path}, select_storage {select_storage}')
        write_tmp_json_file(file_path, self._job_id, select_storage)

    def backup_prerequisite(self):
        logger.info(f'job id: {self._job_id}, backup prerequisite in storage snapshot')
        # 打快照前需要做前置操作
        try:
            self.exec_pre_task()
        except Exception as exception:
            logger.error(f"exec pre task failed. exception {str(exception)}")
            return False, ErrorCode.ERROR_STORAGE_AUTH_INFO_FAILED.value, ''
        # 如果是oracle 12c之前的版本，需要进入备份模式
        if self.is_need_into_backup_mode() and not self.get_backup_mode_flag():
            self.alt_oracle_backup_mode('ALTER DATABASE BEGIN BACKUP;')
        storages = json.loads(self._protect_obj_auth_extend_info.get("storages", "[]"))
        storage_index = self.get_storage_device_index(storages)
        if storage_index < 0:
            logger.error(f"storage_index <0")
            return False, ErrorCode.ERROR_STORAGE_AUTH_INFO_FAILED.value, ''
        wwn_infos, disk_infos, is_support_new_loc_restore = self.get_wwn_disk_infos()
        if not wwn_infos or not disk_infos:
            logger.error(f"get_disk_lun_wwn_infos failed")
            return False, ErrorCode.ERROR_CREATE_LUN_SNAPSHOTS.value, ''
        if not is_support_new_loc_restore:
            logger.warn(f"is_support_new_loc_restore data false")
            self.report_ocr_data_has_same_lun_label()
        storage = storages[storage_index]
        password = json.loads(get_env_variable(f"job_protectObject_auth_extendInfo_storagesPwd_{self._pid}"))[
            storage_index]
        ret, device_manager = check_storage_device_access(storage, password, self._pid)
        if not ret:
            logger.error(f"device_manager not access")
            return False, ErrorCode.ERROR_STORAGE_AUTH_INFO_FAILED.value, ''
        # 清理存储残留的一致性快照
        self.clear_unuseful_lun_snapshot(storage, device_manager)
        ret, lun_infos = StorageV6Manager.query_lun_infos_by_wwn(wwn_infos, device_manager)
        if not ret:
            logger.error(f"query lun infos by wwn failed")
            return False, ErrorCode.ERROR_CREATE_LUN_SNAPSHOTS.value, ''
        # 创建存储快照前判断存储池容量阈值
        ret = self.check_storage_pool_usage_ratio_before_create_snapshot(lun_infos, device_manager)
        if not ret:
            return False, ErrorCode.ERROR_CREATE_LUN_SNAPSHOTS.value, ''
        ret, snapshots = StorageV6Manager.create_lun_snapshots(
            self._protect_obj.get('id', ''), self._job_id, lun_infos, device_manager)
        if not ret or len(snapshots) != len(lun_infos):
            missing_lun_ids = self.get_missing_lun_ids(snapshots, lun_infos)
            logger.error(f"create lun snapshots failed, snapshots {len(snapshots)} lun_infos {len(lun_infos)}, "
                         f"missing_lun_ids {missing_lun_ids}")
            return False, ErrorCode.ERROR_CREATE_LUN_SNAPSHOTS.value, f"LUN ID: {missing_lun_ids}"
        self.append_snapshot_time()
        disk_infos = set_lun_info_for_disk_info(disk_infos, lun_infos, snapshots)
        # 如果打快照成功，将快照信息写入临时仓里面
        self.write_backup_info_to_file(disk_infos, is_support_new_loc_restore, snapshots)
        # 打完快照，oracle 12c之前的版本，需要退出备份模式
        if self.is_need_into_backup_mode() and self.get_backup_mode_flag():
            self.alt_oracle_backup_mode('ALTER DATABASE END BACKUP;')
        return True, 0, ''

    def get_missing_lun_ids(self, snapshots, lun_infos):
        missing_lun_ids = []
        for lun_info in lun_infos:
            if not any(lun_info.lun_id == s.parent_id for s in snapshots):
                missing_lun_ids.append(lun_info.lun_id)
        logger.info(f"get_missing_lun_ids {missing_lun_ids}, {self._job_id}")
        return missing_lun_ids

    def write_backup_info_to_file(self, disk_infos, is_support_new_loc_restore, snapshots):
        file_path = os.path.join(self._cache_area, f'snapshots_{self._job_id}')
        logger.info(f"snapshots is {snapshots}, file_path:{file_path}")
        write_tmp_json_file(file_path, self._job_id, [obj.__dict__ for obj in snapshots])
        agent_uuid = ''
        if self._protect_obj.get("subType", "") == OracleDataBaseType.ORACLE_CLUSTER:
            agent_uuid = get_current_agent_uuid(self._job_extend_info.get('agents', []))
        agent_disk_info = {
            "agent_uuid": agent_uuid, "disk_infos": json.dumps(disk_infos),
            "new_loc_restore": is_support_new_loc_restore
        }
        file_path = os.path.join(self._cache_area, f'disk_infos_{self._job_id}')
        write_tmp_json_file(file_path, self._job_id, agent_disk_info)
        spfile = get_spfile_path(self._pid, self.db_instance, self.db_install_user)
        file_path = os.path.join(self._cache_area, f'spfile_{self._job_id}')
        write_tmp_json_file(file_path, self._job_id, spfile)
        if self.asm_id_name and not self.db_is_cluster:
            asm_spfile = get_asm_spfile_path(self._pid, self.asm_id_name, self.asm_user)
            if asm_spfile:
                file_path = os.path.join(self._cache_area, f'asm_spfile_{self._job_id}')
                write_tmp_json_file(file_path, self._job_id, asm_spfile)

    def check_storage_pool_usage_ratio_before_create_snapshot(self, lun_infos, device_manager):
        max_usage_ratio = self._job_extend_info.get("max_storage_usage_ratio", "100")
        if not max_usage_ratio:
            max_usage_ratio = "100"
        logger.info(f"max_storage_usage_ratio:{max_usage_ratio}")
        # 判断阈值
        storage_pool_id_set = set()
        for lun_info in lun_infos:
            storage_pool_id_set.add(lun_info.parent_id)
        for storage_pool_id in storage_pool_id_set:
            usage_ratio, name = StorageV6Manager.query_storage_pool_capacity_usage_ratio(storage_pool_id,
                                                                                         device_manager)
            if float(usage_ratio) > float(max_usage_ratio):
                log_detail = LogDetail(logInfo=OracleReportLabel.ORACLE_STORAGE_USAGE_LIMIT_LABEL,
                                       logInfoParam=[name, usage_ratio, max_usage_ratio],
                                       logLevel=DBLogLevel.ERROR.value)
                job_detail = SubJobDetails(taskId=self._job_id, subTaskId="", progress=100,
                                           logDetail=[log_detail], taskStatus=SubJobStatusEnum.FAILED.value)
                if platform.system().lower() == Platform.WINDOWS:
                    invoke_rpc_tool_interface(self._job_id,
                                              RpcToolInterface.REPORT_JOB_DETAIL, job_detail.dict(by_alias=True))
                else:
                    report_job_details(self._job_id, job_detail)
                return False
        return True

    def report_ocr_data_has_same_lun_label(self):
        log_detail = LogDetail(logInfo=OracleReportLabel.STORAGE_SNAPSHOT_OCR_DATA_HAS_SAME_LUN_LABEL,
                               logInfoParam=[],
                               logLevel=DBLogLevel.WARN)
        job_detail = SubJobDetails(taskId=self._job_id, subTaskId="", progress=100,
                                   logDetail=[log_detail],
                                   taskStatus=SubJobStatusEnum.RUNNING.value)
        if platform.system().lower() == Platform.WINDOWS:
            invoke_rpc_tool_interface(self._job_id,
                                      RpcToolInterface.REPORT_JOB_DETAIL, job_detail.dict(by_alias=True))
        else:
            report_job_details(self._pid, job_detail)

    def get_wwn_disk_infos(self):
        file_path = os.path.join(self._cache_area, f'wwn_disk_infos_{self._job_id}')
        wwn_disk_infos = read_tmp_json_file(file_path)
        wwn_infos = json.loads(wwn_disk_infos.get('wwn_infos', '[]'))
        disk_infos = json.loads(wwn_disk_infos.get('disk_infos', '[]'))
        is_support_new_loc_restore = wwn_disk_infos.get('is_support_new_loc_restore')
        logger.info(f"wwn_infos:{wwn_infos}")
        if not wwn_infos and not disk_infos:
            logger.info(f"need get_disk_lun_wwn_infos")
            is_asm = True if self.asm_id_name else False
            is_support_new_loc_restore = True
            if is_asm and self._protect_obj.get("subType", "") == OracleDataBaseType.ORACLE_CLUSTER:
                is_support_new_loc_restore = self.check_is_support_new_location_restore()
            # ocr和数据或日志文件放在相同的asm磁盘里时，不支持新位置恢复，备份时需要备份ocr盘
            need_ocr_disk = not is_support_new_loc_restore
            wwn_infos, disk_infos = self.get_disk_lun_wwn_infos(is_asm, need_ocr_disk)
        if len(wwn_infos) != len(disk_infos):
            logger.error(f"disk wwn size not equal, wwn_infos is {len(wwn_infos)}, disk_infos is {len(disk_infos)}")
            return [], [], is_support_new_loc_restore
        return wwn_infos, disk_infos, is_support_new_loc_restore

    def clear_unuseful_lun_snapshot(self, storage, device_manager):
        # 清理当前任务使用的存储上残留的快照
        pre_copy = get_previous_copy_info(
            self._protect_obj, [RpcParamKey.FULL_COPY, RpcParamKey.INCREMENT_COPY], self._job_id)
        lun_snapshots = json.loads(pre_copy.get('extendInfo', {}).get(ORACLEJsonConstant.COPY_LUN_SNAPSHOTS, '[]'))
        pre_snapshot_group_id = ''
        if len(lun_snapshots) > 0:
            snapshot = lun_snapshots[0]
            pre_snapshot_group_id = snapshot.get('snap_cons_group_id', '')
        self.clear_unuseful_lun_snapshot_on_storage(device_manager, pre_snapshot_group_id)
        last_storage = pre_copy.get('extendInfo', {}).get(ORACLEJsonConstant.EXEC_STORAGE, '')
        if last_storage.__contains__(storage.get('ipList')) or storage.get('ipList').__contains__(
                last_storage):
            logger.info(f"last_storage is same")
            return
        # 上次备份的使用的存储和这次不一样，删除上次存储上残留的快照
        last_device_manager = self.get_last_exec_storage_device_manager(last_storage)
        logger.info(f"last_storage {last_storage}, last_device_manager {last_device_manager.storage_ip}")
        if not last_device_manager:
            logger.info(f"last_device_info is none")
            return
        self.clear_unuseful_lun_snapshot_on_storage(last_device_manager, '')

    def get_last_exec_storage_device_manager(self, last_storage):
        storages = json.loads(self._protect_obj_auth_extend_info.get("storages", "[]"))
        device_manager = None
        for index, storage in enumerate(storages):
            if last_storage.__contains__(storage.get('ipList')) or storage.get('ipList').__contains__(
                    last_storage):
                password = json.loads(get_env_variable(f"job_protectObject_auth_extendInfo_storagesPwd_{self._pid}"))[
                    index]
                ret, device_manager = check_storage_device_access(storage, password, self._pid)
                if ret:
                    return device_manager
        return device_manager

    def clear_unuseful_lun_snapshot_on_storage(self, device_manager, pre_snapshot_group_id):
        protect_group_name = f"protect_group_{self._protect_obj.get('id', '')}"
        logger.info(f"protect_group_name {protect_group_name}, pre_snapshot_group_id {pre_snapshot_group_id}")
        protect_group_info = ProtectGroupInfo()
        device_manager.query_protect_group(protect_group_name, protect_group_info)
        if not protect_group_info.protect_group_id:
            logger.info(f"protect_group_info is null")
            return
        consistency_groups = []
        device_manager.query_snapshot_consistency_groups_by_protect_group_id(protect_group_info.protect_group_id,
                                                                             consistency_groups)
        logger.info(f"consistency_groups {consistency_groups}")
        if len(consistency_groups) > 0:
            # 清理当前存储残留的快照
            for snap in consistency_groups:
                if snap.group_id != pre_snapshot_group_id and \
                        not self.is_useful_snapshot_consistency_group(snap, device_manager):
                    logger.info(f"snapshot_consistency_group is unuseful {snap.group_id}")
                    device_manager.delete_snapshot_consistency_group(snap.group_id)

    def is_useful_snapshot_consistency_group(self, snapshot_group, device_manager):
        snapshots = []
        device_manager.query_snapshots_in_consistency_group(snapshot_group.group_id, snapshots, [])
        data_path = self._data_area[random.randrange(0, len(self._data_area))]
        raw_files = glob.glob(f"{data_path}/*.raw")
        for file in raw_files:
            filename, _ = os.path.splitext(os.path.basename(file))
            for snapshot in snapshots:
                logger.info(f"filename is {filename} snapshot wwn {snapshot.wwn}")
                if filename.__contains__(snapshot.wwn):
                    logger.info(f"used_snap_wwn is {filename}")
                    return True
        return False

    def check_is_support_new_location_restore(self):
        if platform.system().lower() == "windows":
            data, log = check_is_support_new_location_restore_windows(self._pid, self.db_instance,
                                                                      self.db_install_user)
        else:
            data, log = check_is_support_new_location_restore(self._pid, self.db_instance,
                                                              self.db_install_user, self.asm_user)
        logger.info(f"is_support_new_loc_restore {data} {log}")
        if data or log:
            return False
        return True

    def is_need_into_backup_mode(self):
        # oralce 12c之前的版本需要进入备份模式
        version = self._protect_obj_extend_info.get('version', '')
        item = str(version).split('.')[0]
        return int(item) < 12

    def get_repository(self):
        repositories_info = parse_backup_path(self._job_dict.get("repositories", []))
        if repositories_info:
            self._meta_area = repositories_info.get("meta_repository", [""])[0]
            self._data_area = repositories_info.get("data_repository", [""])
            self._log_area = repositories_info.get("log_repository", [""])[0]
            self.get_cache_area(repositories_info)

    def get_cache_area(self, repositories_info):
        if self._job_extend_info.get(ORACLEJsonConstant.STORAGE_SNAPSHOT_FLAG,
                                     '').lower() == 'true' and platform.system().lower() == "windows":
            # windows 缓存路径 \\ip\\资源id\\jobid
            # linux 缓存路径 \\ip\\jobid,存储快照模式下两中环境目录共用需要将资源id删除
            cache_area = repositories_info.get("cache_repository", [""])[0]
            if cache_area and cache_area.split('\\')[-2] == self._protect_obj.get("id"):
                self._cache_area = cache_area.replace(self._protect_obj.get("id") + '\\', '')
                if not os.path.exists(self._cache_area):
                    exec_mkdir_cmd(self._cache_area)
        else:
            self._cache_area = repositories_info.get("cache_repository", [""])[0]

    def gen_sub_job(self):
        sub_jobs = []
        file_path = os.path.join(self._cache_area, f'snapshots_{self._job_id}')
        snapshots = read_tmp_json_file(file_path)
        logger.info(f"gen_sub_job snapshots:{snapshots}")
        snapshot_node_ids = []
        protect_node_ids = []
        nodes = self._protect_env.get('nodes')
        for agent in nodes:
            protect_node_ids.append(agent.get('id'))
            is_snapshot_agent = agent.get('extendInfo', {}).get('storage_snapshot_agent_flag', '')
            if is_snapshot_agent:
                snapshot_node_ids.append(agent.get('id'))
        sub_job_nodes = snapshot_node_ids if snapshot_node_ids else protect_node_ids
        logger.info(f"gen_sub_job sub_job_nodes:{sub_job_nodes}")
        sub_job_infos = {}
        # sub_job_infos字典的key为节点node_id，value为待备份的lun在snapshots里的index,lun均匀分配到各个节点上
        for index, _ in enumerate(snapshots):
            node_id = sub_job_nodes[index % len(sub_job_nodes)]
            if sub_job_infos.get(node_id):
                snapshot_index = sub_job_infos.get(node_id, [])
                snapshot_index.append(index)
            else:
                snapshot_index = [index]
                sub_job_infos[node_id] = snapshot_index
        # 每个节点分解一个备份子任务
        for key, value in sub_job_infos.items():
            sub_jobs.append(
                SubJob(jobId=self._job_id, execNodeId=key,
                       policy=SubJobPolicyEnum.FIXED_NODE.value,
                       jobName=BackupSubJobName.BACKUP_LUN,
                       jobInfo=f'{value}'))
        logger.info(f"gen_sub_job sub_jobs:{sub_jobs}")
        return [obj.dict(by_alias=True) for obj in sub_jobs]

    def get_backup_mode_flag(self):
        status_set = get_oracle_file_paths(self._pid, self.db_instance,
                                           self.db_install_user,
                                           OracleFilePathSql.BACKUP_MODE.name)
        logger.info(f"status_set:{status_set}")
        if len(status_set) > 0:
            return True
        return False

    def alt_oracle_backup_mode(self, cmd):
        logger.info(f"Start begin alt oracle backup mode, pid: {self._pid}.")
        result, child = login_oracle_database(self._pid, self.db_instance,
                                              self.db_install_user)
        if not result:
            raise Exception(f'Login database failed.')
        try:
            exec_status, std_out = exec_sql_cmd(child, cmd, PexpectResult.ALTER_DATABASE)
        except Exception as exception:
            logger.error(f'exec cmd failed. {str(exception)}')
            raise Exception(f'exec sql cmd failed.') from exception
        finally:
            if platform.system().lower() == "windows":
                child.kill(signal.SIGTERM)
            else:
                child.close()
        if not exec_status:
            logger.error(f"Failed to execute cmd: {cmd}, pid: {self._pid}.")
            return False
        return True

    def get_backup_snapshots(self, sub_job_id):
        job_info = self._param_dict.get('subJob', {}).get('jobInfo', '{}')
        logger.info(f"storage_snapshot_backup {sub_job_id}, job_info: {job_info} ")
        snapshot_index = self._param_dict.get('subJob', {}).get('jobInfo', [])
        snapshot_index_list = snapshot_index.strip(']').strip('[').split(',')
        logger.info(f"storage_snapshot_backup snapshot_index_list {snapshot_index_list}")
        file_path = os.path.join(self._cache_area, f'snapshots_{self._job_id}')
        snapshots = read_tmp_json_file(file_path)
        backup_snapshots = []
        for index in snapshot_index_list:
            try:
                backup_snapshots.append(snapshots[int(index)])
            except KeyError:
                logger.error(f"storage_snapshot_backup, snapshot index error")
                return []
        return backup_snapshots

    def storage_snapshot_backup(self, sub_job_id):
        file_path = os.path.join(self._cache_area, f'storage_index_{self._job_id}')
        storage_index = read_tmp_json_file(file_path).get('storage_index', -1)
        storages = json.loads(self._protect_obj_auth_extend_info.get("storages", "[]"))
        password = json.loads(get_env_variable(f"job_protectObject_auth_extendInfo_storagesPwd_{self._pid}"))[
            storage_index]
        storage = storages[storage_index]
        ret, device_manager = check_storage_device_access(storage, password, self._pid)
        if not ret:
            logger.error(f"check_storage_device_access failed")
            return ScriptExitCode.ERROR_STORAGE_AUTH_INFO_FAILED
        backup_snapshots = self.get_backup_snapshots(sub_job_id)
        if not backup_snapshots:
            logger.error(f"get_backup_snapshots failed")
            return ScriptExitCode.ERROR_SCRIPT_EXEC_FAILED
        logger.info(f"storage_snapshot_backup backup_snapshots {backup_snapshots}")
        # 获取快照生成路径
        backup_path = self._data_area[random.randrange(0, len(self._data_area))]
        backup_type = int(self._job_param.get('backupType', ''))
        if backup_type not in [BackupTypeEnum.FULL_BACKUP, BackupTypeEnum.INCRE_BACKUP]:
            raise Exception(f'backup type: {backup_type} not support storage snapshot')

        # 拼装执行小工具的指令
        backup_type = BackupTypeStr.FULL_BACKUP if backup_type == BackupTypeEnum.FULL_BACKUP \
            else BackupTypeStr.INCRE_BACKUP

        protocol = storage.get('transport_protocol', 'iscsi')
        tool_args1 = f"{PluginPathConstant.LUN_TOOL} -j {self._job_id}_{sub_job_id} -t {backup_type} " \
                     f"-d {backup_path} -p {protocol} -c {self._concurrent_requests}"
        logger.info(f"storage_snapshot_backup {sub_job_id}, tool_args1: {tool_args1} ")
        backup_json_str = self.gen_storage_snapshot_backup_json_str(backup_path, backup_snapshots, device_manager,
                                                                    backup_type)
        tool_args2 = f"{backup_json_str}"
        tool_args = shlex.split(tool_args1)

        # 开启小工具子进程
        logger.info(f"Start to run LunOpTool. Tool args : {tool_args}.")
        tool_process = subprocess.Popen(tool_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
                                        stderr=subprocess.PIPE, encoding="utf-8")
        # 等待并查询小工具进程是否启动
        time.sleep(SLEEP_TIME)
        process_name = 'LunOpTool'
        tool_not_running = True
        for process in psutil.process_iter(['pid', 'name']):
            if process_name in process.info['name'] and backup_path in process.cmdline():
                tool_not_running = False
                logger.info(f"LunOpTool is running, process id: {process.info['pid']}.")
                time.sleep(SLEEP_TIME)
                tool_process.stdin.write(f"{tool_args2}\n")
                tool_process.stdin.flush()
                break
        if tool_not_running:
            logger.error(f'job id: {self._job_id}, backup failed, LunOpTool is not running!')
            return ScriptExitCode.ERROR_SCRIPT_EXEC_FAILED

        if not self.write_backup_progress():
            return ScriptExitCode.ERROR_SCRIPT_EXEC_FAILED
        # 等待查看是否备份完成
        return ScriptExitCode.SUCCESS

    def write_backup_progress(self):
        # 定时上报任务进度，直至任务执行完毕或失败
        sub_job_id = self._param_dict.get('subJob', {}).get('subJobId', '{}')
        progress = self.query_lun_sub_job_progress(sub_job_id)
        status = progress.get('status', LunToolStatus.REPORT_STATUS_PREP)
        count = 0
        while status == LunToolStatus.REPORT_STATUS_PREP or status == LunToolStatus.REPORT_STATUS_PROG:
            time.sleep(self._query_progress_interval)
            progress = self.query_lun_sub_job_progress(sub_job_id)
            if progress == {} and count > 100:
                # 如果超过25min还没有任务进度文件，lun工具执行有问题，直接报错
                logger.error("lun tool exec failed")
                return False
            status = progress.get('status', LunToolStatus.REPORT_STATUS_PREP)
            count = count + 1
        if status == LunToolStatus.REPORT_STATUS_FAIL:
            return False
        return True

    def query_lun_sub_job_progress(self, sub_job_id):
        # 文件名字report_jobid_subjobid.rep，按subjobid查找进度文件
        file_list = glob.glob(os.path.join(PluginPathConstant.LUN_SUB_JOB_PROGRESS_PATH, f"*{sub_job_id}.rep"))
        if not file_list:
            logger.warn("lun sub job progress file not exist")
            return {}
        # 从小工具文件中获取进度值。
        try:
            sub_job_progress = read_tmp_json_file(file_list[0])
        except json.JSONDecodeError:
            logger.warn("lun sub job progress data error")
            return {}
        logger.info(f"sub_job_progress:{sub_job_progress}")
        return sub_job_progress

    def query_backup_copy_size(self):
        # 从小工具文件中获取副本大小。
        sub_job_id = self._param_dict.get('subJob', {}).get('subJobId', '{}')
        sub_job_progress = self.query_lun_sub_job_progress(sub_job_id)
        # 将容量转换为kb
        total_size = int(sub_job_progress.get("total_capacity", '0')) / 1024
        self.delete_lun_sub_job_progress_file_by_sub_job_id(sub_job_id)
        return total_size

    def delete_lun_sub_job_progress_file_by_sub_job_id(self, sub_job_id):
        file_list = glob.glob(os.path.join(PluginPathConstant.LUN_SUB_JOB_PROGRESS_PATH, f"*{sub_job_id}.rep"))
        if not file_list:
            return
        if file_list[0] and os.path.exists(file_list[0]):
            os.remove(file_list[0])

    def query_lun_tool_progress(self, status, file):
        count = 0
        is_timeout = False
        while status != LunToolStatus.REPORT_STATUS_STOP and status != LunToolStatus.REPORT_STATUS_SUCC and status != \
                LunToolStatus.REPORT_STATUS_FAIL:
            time.sleep(self._query_progress_interval)
            try:
                sub_job_progress = read_tmp_json_file(file)
            except json.JSONDecodeError:
                logger.warn(f"Lun sub job progress data error.")
                return is_timeout
            if count > 120:
                # 如果超过30min还没有停止，说明lun工具执行卡住了，需要杀掉lun工具进程
                logger.warn("Lun tool stop failed : timeout!")
                is_timeout = True
                break
            status = sub_job_progress.get('status', LunToolStatus.REPORT_STATUS_PREP)
            logger.info(f"query_lun_tool_progress. status {status}")
            count = count + 1
        return is_timeout

    def delete_lun_sub_job_progress_file_by_job_id(self):
        # 文件名字格式report_jobid_subjobid.rep，按jobid查找进度文件并删除，用于中止任务
        file_list = glob.glob(os.path.join(PluginPathConstant.LUN_SUB_JOB_PROGRESS_PATH, f"report_{self._job_id}*"))
        if not file_list:
            logger.warn(f"No report file!")
            abort_file = glob.glob(os.path.join(PluginPathConstant.LUN_SUB_JOB_PROGRESS_PATH,
                                                f"abort_oracle_backup_job_{self._job_id}"))
            if not abort_file:
                return
            for file in abort_file:
                if file and os.path.exists(file):
                    os.remove(file)
            return

        for file in file_list:
            try:
                sub_job_progress = read_tmp_json_file(file)
            except json.JSONDecodeError:
                logger.warn(f"Lun sub job progress data error.")
                return
            status = sub_job_progress.get('status', LunToolStatus.REPORT_STATUS_PREP)
            is_timeout = self.query_lun_tool_progress(status, file)
            if is_timeout:
                logger.warn(f"query_lun_tool_progress timeout.")
                self.kill_lun_tool_process()
                break
            logger.info(f"lun_tool_progress stop success, file {file}")

        # 删除告知临时文件
        abort_file = glob.glob(os.path.join(PluginPathConstant.LUN_SUB_JOB_PROGRESS_PATH,
                                            f"abort_oracle_backup_job_{self._job_id}"))
        if not abort_file:
            return
        for file in abort_file:
            if file and os.path.exists(file):
                os.remove(file)

    def kill_lun_tool_process(self):
        logger.info(f"kill_lun_tool_process start")
        pid_list = psutil.pids()
        for pid in pid_list:
            process = psutil.Process(pid)
            cmd = process.cmdline()
            # 找到lun工具进程并杀掉
            if 'LunOpTool' in cmd and self._job_id in cmd:
                process.kill()
                logger.info(f"lun_tool_process has been terminated, job_id: {self._job_id}, pid: {pid}.")

    def gen_storage_snapshot_backup_json_str(self, backup_path, backup_snapshots, device_manager, backup_type):
        param = dict()
        logger.info(f"device_manager.storage_ip:{device_manager.storage_ip}")
        lun_list = []
        for snapshot_info in backup_snapshots:
            lun = {'parentLunId': snapshot_info.get('parent_id'), 'parentLunWWN': snapshot_info.get('parent_wwn')}
            cur_snap_param = {
                'storageSnapId': snapshot_info['snapshot_id'],
                'snapshotName': snapshot_info['name'],
                'snapshotWwn': snapshot_info['wwn'],
                'status': snapshot_info['running_status']
            }
            lun['curSnapshotInfo'] = cur_snap_param
            if backup_type == BackupTypeStr.INCRE_BACKUP:
                # 增量备份,把上一次的备份副本文件名改成用当前{snapshot.wwn}.raw命名
                pre_snap_param = self.get_incremental_backup_param(snapshot_info, backup_path,
                                                                   snapshot_info.get('parent_wwn'))
                lun['preSnapshotInfo'] = pre_snap_param
            lun_list.append(lun)
        storage = {
            'ip': device_manager.storage_ip,
            'port': int(device_manager.storage_port),
            'username': device_manager.storage_username,
            'password': device_manager.storage_password,
            'enableCert': '0',
            'storageType': 'dorado',
            'lunList': lun_list
        }
        param['storages'] = [storage]
        return json.dumps(param).replace(' ', '')

    def get_incremental_backup_param(self, snapshot_info, backup_path, parent_lun_wwn):
        pre_copy = get_previous_copy_info(
            self._protect_obj, [RpcParamKey.FULL_COPY, RpcParamKey.INCREMENT_COPY], self._job_id)
        pre_snapshots = pre_copy.get('extendInfo', {}).get(ORACLEJsonConstant.COPY_LUN_SNAPSHOTS, " ")
        pre_snapshots_list = json.loads(pre_snapshots)
        logger.info(f"pre_snapshots_list:{pre_snapshots_list}")
        pre_snap = {}
        for snap in pre_snapshots_list:
            if snap.get('parent_id') == snapshot_info.get('parent_id'):
                pre_snap = snap
                break
        if not pre_snap:
            raise Exception(f'job id: {self._job_id}, no pre snap info')
        pre_snap_wwn = pre_snap.get('wwn', '')
        pre_wwn_path = backup_path + f"/{parent_lun_wwn}_{pre_snap_wwn}.raw"
        pre_bitmap_path = backup_path + f"/{parent_lun_wwn}_{pre_snap_wwn}.bitmap"
        pre_snap_param = {
            'storageSnapId': pre_snap.get('snapshot_id', ''),
            'snapshotName': pre_snap.get('name', ''),
            'snapshotWwn': pre_snap.get('wwn', ''),
            'status': pre_snap.get('running_status', '')
        }
        # 如果按照pre_copy的副本信息找不到副本文件，直接用data仓里下发的副本文件
        if not os.path.exists(pre_wwn_path):
            logger.warn(f"pre_wwn_path not exits {pre_wwn_path}")
            pre_snap_wwn = self.get_pre_snap_wwn_from_data_path(backup_path, parent_lun_wwn)
            if not pre_snap_wwn:
                raise Exception(f'job id: {self._job_id}, pre_snap_wwn is null')
            pre_wwn_path = backup_path + f"/{parent_lun_wwn}_{pre_snap_wwn}.raw"
            pre_bitmap_path = backup_path + f"/{parent_lun_wwn}_{pre_snap_wwn}.bitmap"
            pre_snap_param = {
                'storageSnapId': '',
                'snapshotName': '',
                'snapshotWwn': pre_snap_wwn,
                'status': ''
            }
        current_wwn_path = backup_path + f"/{parent_lun_wwn}_{snapshot_info['wwn']}.raw"
        current_bitmap_path = backup_path + f"/{parent_lun_wwn}_{snapshot_info['wwn']}.bitmap"
        logger.info(f"pre_wwn_path:{pre_wwn_path}, current_wwn_path {current_wwn_path}")
        os.rename(pre_wwn_path, current_wwn_path)
        os.rename(pre_bitmap_path, current_bitmap_path)
        return pre_snap_param

    def get_pre_snap_wwn_from_data_path(self, data_path, parent_lun_wwn):
        pre_snap_wwn = ''
        raw_files = glob.glob(f"{data_path}/*.raw")
        for file in raw_files:
            filename, _ = os.path.splitext(os.path.basename(file))
            if filename.__contains__(parent_lun_wwn):
                pre_snap_wwn = filename.split('_')[1]
                break
        logger.info(f"get_pre_snap_wwn_from_data_path, {pre_snap_wwn}")
        return pre_snap_wwn

    def delete_pre_lun_in_backup_post_job(self):
        # 删除上一次的存储快照备份一致性组，位置v6 LUN组列表-》详情-》保护 对应的数据
        pre_copy = get_previous_copy_info(
            self._protect_obj, [RpcParamKey.FULL_COPY, RpcParamKey.INCREMENT_COPY], self._job_id)
        lun_snapshots = json.loads(pre_copy.get('extendInfo', {}).get(ORACLEJsonConstant.COPY_LUN_SNAPSHOTS, '[]'))
        if len(lun_snapshots) == 0:
            return
        snapshot = lun_snapshots[0]
        logger.debug(f"snapshot:{snapshot}")
        snapshot_consistency_group_id = snapshot.get('snap_cons_group_id', '')
        logger.info(f"delete pre lun snapshot_consistency_group_id:{snapshot_consistency_group_id}")
        self.common_delete_lun_snapshots(snapshot_consistency_group_id)

    def delete_current_lun_in_backup_post_job(self):
        # 删除当前存储快照备份一致性组
        file_path = os.path.join(self._cache_area, f'snapshots_{self._job_id}')
        lun_snapshots = read_tmp_json_file(file_path)
        if lun_snapshots == {}:
            return
        snapshot = lun_snapshots[0]
        if not snapshot:
            logger.info(f"snapshots_{self._job_id} is empty")
            return
        snapshot_consistency_group_id = snapshot.get('snap_cons_group_id', '')
        logger.info(f"delete current lun snapshot_consistency_group_id:{snapshot_consistency_group_id}")
        self.common_delete_lun_snapshots(snapshot_consistency_group_id)

    def get_current_lun_snapshot(self):
        file_path = os.path.join(self._cache_area, f'snapshots_{self._job_id}')
        lun_snapshots = list(read_tmp_json_file(file_path))
        if not lun_snapshots:
            return {}
        snapshot = lun_snapshots[0]
        logger.debug(f"current lun snapshots:{snapshot}")
        return snapshot

    def delete_current_lun_in_backup_post_job_by_abort_job(self, snapshot):
        # 删除当前存储快照备份一致性组
        logger.info("start delete current lun by abort job")
        if not snapshot:
            return
        # 删除当前需要等小工具的lun组删除完成后才能执行，否则会报错
        time.sleep(90)
        snapshot_consistency_group_id = snapshot.get('snap_cons_group_id', '')
        logger.info(f"delete current lun snapshot_consistency_group_id:{snapshot_consistency_group_id}")
        self.common_delete_lun_snapshots(snapshot_consistency_group_id)

    def exec_pre_task(self):
        logger.info(f'job id: {self._job_id}, exec storage snapshot')
        self.prepare_params()
        # 保存数据库信息
        ret_code = self.backup_additional_info()
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Backup additional info failed")
            raise Exception("Backup additional info failed")

        ret_code = self.prepare_scn_info()
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Execute backup archivelog pre failed, pid:{self._job_id}.")
            raise Exception("backup archivelog pre failed")

        if self._protect_obj.get("subType", "") == OracleDataBaseType.ORACLE_CLUSTER:
            if not self.backup_pfile():
                logger.error(f"Execute backup pfile failed, pid:{self._job_id}.")
                raise Exception("backup pfile failed")

    def prepare_scn_info(self):
        logger.info(f'job id: {self._job_id}, start to backup archivelog pre')
        scn_time_info = get_scn_and_time(self._pid, self._job_id, self.db_instance, self.db_install_user)
        if not scn_time_info:
            logger.error(f"Min data file scn is invalid, pid:{self._job_id}.")
            return ScriptExitCode.ERROR_PARAM_INVALID

        scn = scn_time_info[0]
        systime = scn_time_info[1]
        timestamp = add_unix_timestamp(systime)

        scn_dbf_max_content = f"{scn} {systime} {timestamp}\n"
        logger.info(f"scn_dbf_max_content is {scn_dbf_max_content}")
        write_file(os.path.join(self.additional, "scn_dbf_max"), scn_dbf_max_content)

        result_content = {"last_backup_scn": scn, "timeStamp": timestamp, "resetlogs_id": self.resetlogs_id}
        file_path = os.path.join(self._cache_area, f'result_content{self._job_id}')
        logger.info(f"result_content is {result_content}")
        write_tmp_json_file(file_path, self._job_id, result_content)
        return ScriptExitCode.SUCCESS

    def backup_additional_info(self):
        logger.info(f'job id: {self._job_id}, start to backup additional info')
        self.additional = os.path.join(self.main_backup_path, "additional")
        if not os.path.exists(self.additional):
            logger.info(f"create additional dir: {self.additional}")
            if platform.system().lower() == "windows":
                create_dir(self._job_id, self.additional, self.asm_id_name, self.oracle_home)
            else:
                os.mkdir(self.additional, 0o700)
                execute_cmd(f"chmod -R root {self.additional}")
        login_params = {
            'pid': self._pid,
            'instance_name': self.db_instance,
            'db_install_user': self.db_install_user,
            'auth_pwd': f"job_protectObject_auth_authPwd_{self._pid}",
            'auth_key': f"job_protectObject_auth_authKey_{self._pid}"
        }
        base_info = get_database_info(login_params, self._job_id)
        if not base_info:
            logger.error(f"Get database base info failed, pid:{self._job_id}")
            return ScriptExitCode.ERROR_PARAM_INVALID

        [log_mode, db_id, uniq_name, self.open_mode, incarnation_number, self.resetlogs_id] = base_info
        if log_mode != ArchiveLogMode.ARCHIVELOG.value:
            logger.error(f"Archive Mode=No Archive Mode, check archive mode failed, pid: {self._job_id}.")
            return ScriptExitCode.ERROR_ORACLE_NOARCHIVE_MODE

        logger.info(f"Write backup metadata info, pid: {self._job_id}.")
        db_info = f"{db_id};{uniq_name};{self.db_instance};{self.asm_user};{self.asm_id_name};{self.db_is_cluster};" \
                  f"{self.ora_pre_version_extend};{incarnation_number};{self.resetlogs_id};{self.ora_version}"
        write_file(os.path.join(self.additional, 'dbinfo'), db_info)

        oracle_info = f"ORACLE_BASE={self.oracle_base}\nORACLE_HOME={self.oracle_home}"
        write_file(os.path.join(self.additional, 'env_file'), oracle_info)
        return ScriptExitCode.SUCCESS

    def backup_pfile(self):
        logger.info(f"backup_pfile, {self._job_id}.")
        pfile_path = os.path.join(self.main_backup_path, "pfile")
        if platform.system().lower() == "windows":
            if not os.path.exists(pfile_path):
                os.makedirs(pfile_path)
            ret_code = create_pfile_from_spfile(self.login_db_params, pfile_path, self.db_name)
            return ret_code == ScriptExitCode.SUCCESS
        exec_mkdir_cmd(pfile_path)
        exec_lchown_dir_recursively(pfile_path, self.db_install_user, self.oracle_group)
        return create_pfile_from_spfile_linux(self._pid, self.db_instance, self.db_install_user, pfile_path,
                                              self.db_name)

    def append_snapshot_time(self):
        scn_time_info = get_scn_and_time(self._pid, self._job_id, self.db_instance, self.db_install_user)
        if not scn_time_info:
            logger.error(f"Min data file scn is invalid, pid:{self._job_id}.")
            raise Exception("query time failed")
        systime = scn_time_info[1]
        timestamp = add_unix_timestamp(systime)
        if platform.system().lower() == "windows":
            encoding = locale.getdefaultlocale()[1]
            dbinfo_content = read_result_file(os.path.join(self.additional, "dbinfo"), encoding=encoding).strip()
        else:
            dbinfo_content = read_result_file(os.path.join(self.additional, "dbinfo")).strip()
        dbinfo_content += f";{timestamp}"
        delete_file(os.path.join(self.additional, 'dbinfo'))
        write_file(os.path.join(self.additional, 'dbinfo'), dbinfo_content)

    def prepare_params(self):
        self.db_instance = self._backup_param.get('InstanceName')
        self.db_user = self._backup_param.get('UserName')
        self.db_password = self._backup_param.get('Password')
        self.db_name = self._backup_param.get('AppName')
        self.level = self._backup_param.get('Level')
        self.db_install_user = self._backup_param.get('OracleInstallUser')
        self.asm_id_name = self._backup_param.get('ASMInstanceName')
        self.oracle_home = self._backup_param.get('OracleHome')
        self.oracle_base = self._backup_param.get('OracleBase')
        self.oracle_group = self._backup_param.get('OracleGroup')
        self.asm_user = self._backup_param.get('ASMUserName')
        enc_algo = self._backup_param.get('EncAlgo')
        enc_key = self._backup_param.get('EncKey')
        if enc_algo and enc_key:
            self.is_enc_bk = 1
            self.rman_enc_section = f"configure encryption for database on;\n" \
                                    f"configure encryption algorithm '{enc_algo}';\n" \
                                    f"set encryption on identified by \"{enc_key}\" only;\n" \
                                    f"set decryption identified by \"{enc_key}\";\n"
            logger.info(f"Backup database will enc, pid:{self._job_id}.")
        self.main_backup_path = self._data_area[random.randrange(0, len(self._data_area))]
        if platform.system().lower() == "windows":
            [self.ora_version, self.ora_pre_version, self.ora_pre_version_extend] = get_oracle_version(self._pid)
            self.log_is_backed_up = f"{LOG_IS_VALID} and name like '{self.main_backup_path}\\log\\arch_%'"
        else:
            user = self.db_install_user
            [self.ora_version, self.ora_pre_version, self.ora_pre_version_extend] = get_linux_oracle_version(user)
            self.log_is_backed_up = f"{LOG_IS_VALID} and name like '{self.main_backup_path}/log/arch_%'"

        # 初始化登录rman/oracle常用参数
        self.run_rman_params = {
            'is_enc_bk': self.is_enc_bk,
            'instance_name': self.db_instance,
            'db_user': self.db_user,
            'db_password': self.db_password,
            'rman_enc_section': self.rman_enc_section,
            'db_install_user': self.db_install_user
        }

        self.login_db_params = {
            'pid': self._pid,
            'instance_name': self.db_instance,
            'db_user': self.db_user,
            'db_password': self.db_password,
            'db_install_user': self.db_install_user
        }
        logger.info(f"ora_version is {self.ora_version}")

    def common_delete_lun_snapshots(self, snapshot_consistency_group_id):
        file_path = os.path.join(self._cache_area, f'storage_index_{self._job_id}')
        storage_index = read_tmp_json_file(file_path).get('storage_index', -1)
        logger.info(f'delete lun snapshots, storage_index: {storage_index}')
        storages = json.loads(self._protect_obj_auth_extend_info.get("storages", "[]"))
        logger.info(f'delete lun snapshots, storages: {storages}')
        password = json.loads(get_env_variable(f"job_protectObject_auth_extendInfo_storagesPwd_{self._pid}"))[
            storage_index]
        data = storages[storage_index]
        cert_path, crl_path = write_cert_2_file(data.get('certification'), data.get('revocationList'), self._pid)
        storage = DeviceInfo(data.get('ipList'), int(data.get('port')), data.get('username'),
                             password, data.get('enableCert'), cert_path, crl_path, False)
        logger.info(f'delete lun snapshots, snapshot_consistency_group_id: {snapshot_consistency_group_id}')
        StorageV6Manager.delete_lun_snapshots(snapshot_consistency_group_id, storage)
        delete_file(cert_path)
        delete_file(crl_path)

    def check_backup_lun_is_same_with_pre(self):
        logger.info("check_backup_lun_is_same_with_pre")
        self.prepare_params()
        wwn_infos, disk_infos, _ = self.get_wwn_disk_infos()
        pre_copy = get_previous_copy_info(
            self._protect_obj, [RpcParamKey.FULL_COPY, RpcParamKey.INCREMENT_COPY], self._job_id)
        pre_disk_infos = json.loads(pre_copy.get('extendInfo', {}).get(ORACLEJsonConstant.COPY_DISK_INFOS, '[]'))
        logger.info(f"wwn_infos {wwn_infos}, pre_disk_infos {pre_disk_infos}")
        if len(pre_disk_infos) != len(wwn_infos):
            logger.error(f"pre len is {len(pre_disk_infos)}, cur len {len(wwn_infos)}")
            return False
        pre_wwn_size = {}
        for disk_info in pre_disk_infos:
            pre_wwn = disk_info.get('local_wwn')
            if pre_wwn not in wwn_infos:
                logger.error(f"{pre_wwn} not in {wwn_infos} ")
                return False
            pre_wwn_size[pre_wwn] = disk_info.get('disk_size')
        storages = json.loads(self._protect_obj_auth_extend_info.get("storages", "[]"))
        storage_index = self.get_storage_device_index(storages)
        storage = storages[storage_index]
        password = json.loads(get_env_variable(f"job_protectObject_auth_extendInfo_storagesPwd_{self._pid}"))[
            storage_index]
        ret, device_manager = check_storage_device_access(storage, password, self._pid)
        if not ret:
            logger.error(f"device_manager not access")
            return False
        ret, lun_infos = StorageV6Manager.query_lun_infos_by_wwn(wwn_infos, device_manager)
        for lun_info in lun_infos:
            pre_lun_size = pre_wwn_size.get(lun_info.wwn)
            curr_lun_size = int(int(lun_info.capacity) / 2)
            if pre_lun_size != curr_lun_size:
                logger.error(f"wwn is {lun_info.wwn}, pre_size {pre_lun_size}, curr_size {curr_lun_size}")
                return False
        return True
