#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import locale
import os
import time

from oracle import logger
from common.common import read_result_file
from common.env_common import get_install_head_path
from common.file_common import delete_file
from oracle.common.backup_common import get_last_log_scn_info

from oracle.common.constants import ScriptExitCode, LOG_IS_VALID, RETURN_INVALID, ArchiveLogMode, PluginPathConstant, \
    OracleTruncateLog, OracleDataBaseType
from oracle.common.windows_common import write_tmp_file, redirect_backup_path, check_oracle_install_type, \
    get_asm_instance, get_oracle_version, check_sqlplus_status, check_rman_status, get_real_instance_name, create_dir, \
    get_database_info, execute_windows_rman_cmd, get_datafile_mini_scn, get_datafile_info, create_pfile_from_spfile, \
    cross_check_backup_dbf, cross_check_archive_log, build_backup_database_sql, \
    add_unix_timestamp, move_pfile_to_stmp, get_dbfiles, get_online_redo_log_info, create_empty_file, \
    generate_additional_info, get_dir_file_info, change_archive_log_unavailable, delete_archive_log, set_spfile_flag, \
    get_archivelog_current_range, get_all_archivelog_range, build_backup_archive_log_sql, backup_windows_log, \
    get_last_log_scn, get_database_node_list, create_archive_log, check_archive_log_lost, get_archive_log_num, \
    execute_windows_sqlplus_cmd, set_db_silence_sql
from oracle.services.backup.windows_backup_func import check_standby_database_sequence


class OracleWindowsBackupService(object):
    def __init__(self, pid, job_id, params):
        self._job_id = job_id
        self._pid = pid
        self.params = params
        self.ora_version = ""
        self.ora_pre_version = ""
        self.ora_pre_version_extend = ""
        self.db_is_cluster = 0
        self.db_user = self.params.get('UserName')
        self.db_password = self.params.get('Password')
        self.archive = self.params.get('LogPath')
        self.db_uuid = self.params.get('DBUUID')
        self.sub_type = self.params.get('sub_type')
        if self.sub_type == OracleDataBaseType.ORACLE_PDB:
            self.db_name = self.params.get('ParentName')
        else:
            self.db_name = self.params.get('AppName')
        self.last_backup_scn = self.params.get('LastBackupScn')
        self.last_backup_reset_logs_id = self.params.get('LastBackupResetLogsId')
        self.ip_port_info = self.params.get('IpPortInfo').split(";")
        self.channels = int(self.params.get('Channel'))
        self.qos = self.params.get('Qos')
        self.node_number = self.params.get('NodeNumber')
        self.truncate_log = self.params.get('truncateLog')
        self.backup_path_list = self.params.get('DataPath')
        self.asm_id_name = self.params.get('ASMInstanceName')
        self.level = self.params.get('Level')
        self.oracle_home = self.params.get('OracleHome')
        self.oracle_base = self.params.get('OracleBase')
        self.asm_user = self.params.get('ASMUserName')
        self.resetlogs_id = ''
        self.open_mode = ''
        self.from_scn = ''
        self.is_enc_bk = 0
        self.log_num = 0
        self.dbf_info = ''
        self.additional = ''
        self.backup_tmp = ''
        self.rman_enc_section = ''
        self.log_is_backed_up = ''
        self.main_backup_path = ''
        self.db_instance = self.params.get('InstanceName')
        self.result_file = ''
        self.result_content = ''
        self.login_db_params = {}
        self.run_rman_params = {}
        self.start_with_spfile_flag = True
        self.check_archive_log_lost = "false"

    def get_build_data_backup_params(self):
        return {
            'pid': self._job_id,
            'datafile_info': self.dbf_info,
            'backup_path_list': self.backup_path_list,
            'db_name': self.db_name,
            'level': self.level,
            'additional': self.additional,
            'ora_pre_version': self.ora_pre_version,
            'is_enc_bk': self.is_enc_bk,
            'qos': self.qos,
            'channels': self.channels,
            'db_is_cluster': self.db_is_cluster,
            'node_number': self.node_number,
            'ip_port_info': self.ip_port_info,
            'main_backup_path': self.main_backup_path,
            'backup_tmp': self.backup_tmp,
            'open_mode': self.open_mode,
            'oracle_home': self.oracle_home,
            'asm_instance': self.asm_id_name
        }

    def get_build_log_backup_params(self):
        return {
            'is_enc_bk': self.is_enc_bk,
            'ora_pre_version': self.ora_pre_version,
            'qos': self.qos,
            'log_num': self.log_num,
            'channels': self.channels,
            'archive': self.archive,
            'resetlogs_id': self.resetlogs_id,
            'open_mode': self.open_mode,
            'from_scn': self.from_scn,
            'db_is_cluster': self.db_is_cluster,
            'node_number': self.node_number,
            'ip_port_info': self.ip_port_info
        }

    def get_dbf_info_content(self, file_path):
        dbf_content = ""
        with open(file_path, 'r') as file:
            for line in file:
                dbf_arr = line.split()
                if len(dbf_arr):
                    dbf_content += f"{dbf_arr[0]};{dbf_arr[1]};{dbf_arr[2]};{dbf_arr[3]}\n"

        db_files_path = os.path.join(self.additional, "dbfiles")
        write_tmp_file(db_files_path, dbf_content)

    def get_log_file_content(self, file_path):
        log_file_content = ""
        with open(file_path, 'r') as file:
            for line in file:
                log_file_arr = line.split()
                if len(log_file_arr):
                    log_file_content += f"{log_file_arr[0]};{log_file_arr[1]};\n"

        log_files_path = os.path.join(self.additional, "logfiles")
        write_tmp_file(log_files_path, log_file_content)

    def init_login_params(self):
        self.login_db_params = {
            'pid': self._job_id,
            'instance_name': self.db_instance,
            'db_user': self.db_user,
            'db_password': self.db_password,
            'oracle_home': self.oracle_home
        }

        self.run_rman_params = {
            'is_enc_bk': self.is_enc_bk,
            'instance_name': self.db_instance,
            'db_user': self.db_user,
            'db_password': self.db_password,
            'rman_enc_section': self.rman_enc_section
        }

    def backup_data_windows(self):
        ret_code = self.prepare_backup()
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Execute prepare backup failed, pid:{self._job_id}.")
            return ret_code

        enc_algo = self.params.get('EncAlgo')
        enc_key = self.params.get('EncKey')

        if enc_algo and enc_key:
            self.is_enc_bk = 1
            self.rman_enc_section = f"configure encryption for database on;\n" \
                                    f"configure encryption algorithm '{enc_algo}';\n" \
                                    f"set encryption on identified by \"{enc_key}\" only;\n" \
                                    f"set decryption identified by \"{enc_key}\";\n"
            logger.info(f"Backup database will enc, pid:{self._job_id}.")
        # check host environment begin
        # 初始化登录rman/oracle常用参数
        self.init_login_params()
        ret_code = self.check_host_environment()
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Check host environment failed, pid:{self._job_id}.")
            return ret_code
        logger.info(f"Check cluster [db_is_cluster={self.db_is_cluster}], db_instance={self.db_instance}.")

        logger.info(f"PID={self._job_id};DBUUID={self.db_uuid};DBINSTANCE={self.db_instance};DBNAME={self.db_name};"
                    f"DBUSER={self.db_user};ASMInst={self.asm_id_name};ASMUSER={self.asm_user};"
                    f"IN_ORACLE_HOME={self.oracle_home};BACKUP_PATH_LIST={self.backup_path_list};"
                    f"ARCHIVE={self.archive};LEVEL={self.level};CHANNELS={self.channels};QOS={self.qos};"
                    f"ENCALGO={enc_algo};IPPORTINFO={self.ip_port_info};NODENUMBER={self.node_number}.")

        # prepare data backup
        ret_code = self.prepare_data_backup()
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Prepare data backup failed, pid:{self._job_id}.")
            return ret_code
        # backup data file
        ret_code = self.backup_data_file()
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Backup data file failed, pid:{self._job_id}.")
            return ret_code
        # backup control file
        ret_code = self.backup_control_file()
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Backup control file failed, pid:{self._job_id}.")
            return ret_code
        # backup additional file
        ret_code = self.backup_additional_file()
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Backup additional file failed, pid:{self._job_id}.")
            return ret_code
        logger.info(f"Do backup success, pid: {self._job_id}.")
        return ScriptExitCode.SUCCESS

    def prepare_backup(self):
        self.start_with_spfile_flag = set_spfile_flag(self._job_id, self.db_instance, self.db_user, self.db_password)
        self.backup_path_list = redirect_backup_path(self.backup_path_list)
        self.main_backup_path = self.backup_path_list.split(";")[0]
        self.additional = f"{self.main_backup_path}\\additional"
        self.backup_tmp = f"{self.main_backup_path}\\tmp"
        logger.info(f"Get backup_path_list: {self.backup_path_list}, main_backup_path: {self.main_backup_path}, "
                    f"pid: {self._job_id}.")
        # 检查是否存在main_backup_path
        cnt = 1
        while cnt < 5:
            if not os.path.exists(self.main_backup_path):
                logger.error(f"Data path is invalid, pid:{self._job_id}.")
                time.sleep(3)
            else:
                break
            cnt += 1
        if not os.path.exists(self.main_backup_path):
            logger.error(f"Data path is invalid, pid:{self._job_id}.")
            return ScriptExitCode.ERROR_PARAM_INVALID
        self.log_is_backed_up = f"{LOG_IS_VALID} and name like '{self.main_backup_path}\\log\\arch_%'".upper()
        if self.channels == 0:
            self.channels = 4
            logger.warning(f"Setting channels number to {self.channels} by default.")
        if self.channels > 254:
            logger.error("Channel params is invalid.")
            return RETURN_INVALID
        return ScriptExitCode.SUCCESS

    def check_host_environment(self):
        logger.info(f"Enter function Check host environment, pid:{self._job_id}.")
        self.db_is_cluster = check_oracle_install_type()
        if self.db_is_cluster > 0:
            self.asm_id_name = get_asm_instance(self._job_id, "OracleASMService")
            if not self.asm_id_name:
                logger.error(f"Have no asm instance, pid={self._job_id}.")
                return RETURN_INVALID
        [self.ora_version, self.ora_pre_version, self.ora_pre_version_extend] = get_oracle_version(self._pid)
        if not check_sqlplus_status() or not check_rman_status():
            return RETURN_INVALID
        self.db_instance = get_real_instance_name(self.login_db_params, self.db_is_cluster, self.db_name)
        if not self.db_instance:
            logger.error(f"Database real instance name is invalid, pid:{self._job_id}.")
            return RETURN_INVALID
        logger.info(f"Check cluster [db_is_cluster={self.db_is_cluster}], db_instance={self.db_instance}.")
        return ScriptExitCode.SUCCESS

    def prepare_data_backup(self):
        logger.info(f"Enter function prepare data backup, pid:{self._job_id}.")
        create_dir(self._job_id, self.backup_path_list, self.asm_id_name, self.oracle_home)
        create_dir(self._job_id, self.backup_tmp, self.asm_id_name, self.oracle_home)
        create_dir(self._job_id, self.archive, self.asm_id_name, self.oracle_home)
        create_dir(self._job_id, self.additional, self.asm_id_name, self.oracle_home)
        create_dir(self._job_id, os.path.join(self.additional, 'dbs'), self.asm_id_name, self.oracle_home)
        create_dir(self._job_id, os.path.join(self.main_backup_path, 'log'), self.asm_id_name, self.oracle_home)

        ret_code, base_info = get_database_info(self._job_id, self.db_instance, self.db_user, self.db_password)
        if ret_code != ScriptExitCode.SUCCESS or not base_info:
            logger.error(f"Get database base info failed, pid:{self._job_id}")
            return ret_code

        [log_mode, db_id, uniq_name, self.open_mode, incarnation_number, self.resetlogs_id] = base_info
        if log_mode != ArchiveLogMode.ARCHIVELOG.value:
            logger.error(f"Archive Mode=No Archive Mode, check archive mode failed, pid: {self._job_id}.")
            return ScriptExitCode.ERROR_ORACLE_NOARCHIVE_MODE

        logger.info(f"Write backup metadata info, pid: {self._job_id}.")
        db_info = f"{db_id};{uniq_name};{self.db_instance};{self.asm_user};{self.asm_id_name};{self.db_is_cluster};" \
                  f"{self.ora_pre_version_extend};{incarnation_number};{self.resetlogs_id};{self.ora_version}"
        write_tmp_file(os.path.join(self.additional, 'dbinfo'), db_info)

        oracle_info = f"ORACLE_BASE={self.oracle_base}\nORACLE_HOME={self.oracle_home}"
        write_tmp_file(os.path.join(self.additional, 'env_file'), oracle_info)

        return ScriptExitCode.SUCCESS

    def build_backup_sql(self, backup_database_sql, data_min_scn):
        data_backup_params = self.get_build_data_backup_params()
        build_backup_database_sql(backup_database_sql, data_min_scn, data_backup_params)
        if not self.start_with_spfile_flag:
            with open(backup_database_sql, "r") as file:
                lines = file.readlines()
            # 保留不包含指定字符串的行
            new_lines = [line for line in lines if "backup spfile format" not in line]
            # 将筛选后的行写回文件，覆盖原文件
            with open(backup_database_sql, "w") as file:
                file.writelines(new_lines)

    def backup_data_file(self):
        ret_code, data_min_scn = get_datafile_mini_scn(self._job_id, self.db_instance, self.db_user, self.db_password)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Get datafile mini scn failed, pid:{self._job_id}.")
            return ret_code
        if not data_min_scn:
            logger.error(f"Min data file scn is invalid, pid:{self._job_id}.")
            return ScriptExitCode.ERROR_PARAM_INVALID

        self.dbf_info = f"{PluginPathConstant.WINDOWS_TMP_PATH}/get_datafile_info_rst_{self._job_id}.sql"
        ret_code = get_datafile_info(self._job_id, self.ora_pre_version, self.db_user, self.db_password,
                                     instance_name=self.db_instance, tmp_rst=self.dbf_info)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Get database data file info failed, pid:{self._job_id}.")
            delete_file(self.dbf_info)
            return ret_code

        ret_code = create_pfile_from_spfile(self.login_db_params, self.main_backup_path, self.db_name,
                                            self.start_with_spfile_flag)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Create pfile failed, pid:{self._job_id}")
            return ret_code

        cross_check_backup_dbf(self._job_id, self.run_rman_params, 0, self.db_name)
        cross_check_archive_log(self._job_id, self.run_rman_params, 0)

        backup_database_sql = f"{PluginPathConstant.WINDOWS_TMP_PATH}/backup_database_sql_{self._job_id}.sql"
        backup_database_rst = f"{PluginPathConstant.WINDOWS_TMP_PATH}/backup_database_rst_{self._job_id}.sql"
        self.build_backup_sql(backup_database_sql, data_min_scn)

        start_time = time.strftime('%Y-%m-%d_%H:%M:%S', time.localtime(time.time()))
        logger.info("Start running RMAN to backup")
        ret_code = execute_windows_rman_cmd(self._job_id, self.run_rman_params, 1, backup_database_sql,
                                            backup_database_rst)
        if ret_code != ScriptExitCode.SUCCESS:
            encoding = locale.getdefaultlocale()[1]
            logger.error(f"Backup database-{self.db_instance} failed, error_code:{ret_code}, pid:{self._job_id}, "
                         f"error is:{read_result_file(backup_database_rst, encoding=encoding)}")
            delete_file(backup_database_sql)
            delete_file(backup_database_rst)
            delete_file(self.dbf_info)
            return ret_code

        end_time = time.strftime('%Y-%m-%d_%H:%M:%S', time.localtime(time.time()))
        logger.info(f"Backup start time is {start_time} end time is {end_time}, pid:{self._job_id}.")
        delete_file(backup_database_sql)
        delete_file(backup_database_rst)
        logger.info(f"Backup database-{self.db_instance} success, pid:{self._job_id}.")
        ret_code = self.check_backup_archive_log_lost(data_min_scn)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"check_backup_archive_log_lost failed, pid:{self._job_id}")
            return ret_code
        self.backup_scn_dbf_max_content()
        return ScriptExitCode.SUCCESS

    def backup_scn_dbf_max_content(self):
        last_log_scn, last_log_time = get_last_log_scn_info(self.login_db_params, self.log_is_backed_up,
                                                            self._job_id)
        last_log_timestamp = add_unix_timestamp(last_log_time)

        self.result_file = f"{PluginPathConstant.WINDOWS_TMP_PATH}/result_tmp{self._job_id}"
        self.result_content = f"last_backup_scn;{last_log_scn}\n" \
                              f"timeStamp;{last_log_timestamp}\n" \
                              f"resetlogs_id;{self.resetlogs_id}\n" \
                              f"check_archive_log_lost;{self.check_archive_log_lost}\n"

        scn_dbf_max_content = f"{last_log_scn} {last_log_time} {last_log_timestamp}\n"
        write_tmp_file(os.path.join(self.additional, "scn_dbf_max"), scn_dbf_max_content)
        logger.info(f"Databackuprst;{last_log_scn};{last_log_time};{last_log_timestamp};{self.resetlogs_id}.")

    def check_backup_archive_log_lost(self, from_scn):
        ret_code, end_scn = get_datafile_mini_scn(self._job_id, self.db_instance, self.db_user, self.db_password)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Get datafile mini scn failed, pid:{self._job_id}.")
            return ret_code
        ret, node_list = get_database_node_list(self._job_id, self.db_instance, self.db_user, self.db_password)
        cmd_param = {
            'from_scn': from_scn,
            'end_scn': end_scn,
            'main_backup_path': self.main_backup_path,
            'resetlogs_id': self.resetlogs_id
        }
        ret_code = check_backup_archive_log_lost(self.login_db_params, cmd_param, node_list)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"check_backup_archive_log_lost failed.")
            self.check_archive_log_lost = "true"
        return ScriptExitCode.SUCCESS

    def backup_control_file(self):
        ret_code = move_pfile_to_stmp(self._job_id, self.main_backup_path, self.db_name)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Move pfile to stmp failed.")
            return RETURN_INVALID

        self.result_content += f"pfile;tempfile_{self._job_id}\n" \
                               f"BackupLevel;{self.level}\n"
        logger.info(f"pfile: tempfile_{self._job_id}, BackupLevel: {self.level}, pid:{self._job_id}.")

        logger.info(f"Recording database control files, pid: {self._job_id}.")
        ret_code, control_file_info = get_dbfiles(self._job_id, self.db_instance, self.db_user, self.db_password, 0)
        if ret_code != ScriptExitCode.SUCCESS or not control_file_info:
            logger.error(f"Get control file info failed, pid:{self._job_id}")
            return RETURN_INVALID
        write_tmp_file(f"{self.additional}\\ctrlfiles", control_file_info)

        logger.info(f"Recording database spfile, pid: {self._job_id}.")
        if self.start_with_spfile_flag:
            ret_code, spfile_info = get_dbfiles(self._job_id, self.db_instance, self.db_user, self.db_password, 1)
            if ret_code != ScriptExitCode.SUCCESS or not spfile_info:
                logger.error(f"Get spfile file info failed. pid:{self._job_id}.")
                return RETURN_INVALID
            write_tmp_file(f"{self.additional}\\spfile", spfile_info)

        logger.info(f"Recording database Data Guard configuration file, pid: {self._job_id}.")
        ret_code, dg_controlfile_info = get_dbfiles(self._job_id, self.db_instance, self.db_user, self.db_password, 2)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Get data guard conf file info failed, pid:{self._job_id}.")
            return RETURN_INVALID
        write_tmp_file(f"{self.additional}\\dataguardconffiles", dg_controlfile_info)
        logger.info(f"Recording database data files and tablespace, pid:{self._job_id}.")
        db_files_path = os.path.join(self.additional, "dbfiles")
        if os.path.exists(db_files_path):
            delete_file(db_files_path)
        self.get_dbf_info_content(self.dbf_info)
        delete_file(self.dbf_info)

        logger.info(f"Recording database online log info, pid: {self._job_id}.")
        tmp_rst = f"{PluginPathConstant.WINDOWS_TMP_PATH}/get_online_log_info_rst_{self._job_id}.txt"
        ret_code = get_online_redo_log_info(self._job_id, self.db_instance, self.db_user, self.db_password, tmp_rst)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Get database online log file info failed, pid: {self._job_id}.")
            delete_file(tmp_rst)
            return RETURN_INVALID

        log_files_path = os.path.join(self.additional, "logfiles")
        if os.path.exists(log_files_path):
            delete_file(log_files_path)
        self.get_log_file_content(tmp_rst)
        delete_file(tmp_rst)

        return ScriptExitCode.SUCCESS

    def backup_additional_file(self):
        logger.info(f"Copy some additional info, pid: {self._job_id}.")
        params = {
            'oracle_home': self.oracle_home,
            'db_name': self.db_name,
            'db_instance': self.db_instance,
            'asm_id_name': self.asm_id_name
        }
        generate_additional_info(self._job_id, self.additional, params)
        if self.level == 0 and not os.path.exists(f"{self.additional}\\first_backup_success"):
            create_empty_file(f"{self.additional}\\first_backup_success")
            logger.info(f"Create first backup success file success, pid:{self._job_id}.")

        file_list = f"{PluginPathConstant.WINDOWS_TMP_PATH}/filelist{self._job_id}"
        get_dir_file_info(self._job_id, self.main_backup_path, file_list)
        self.result_content += f"filelist;filelist{self._job_id}\n"
        logger.info(f"Filelist: filelist{self._job_id}.")
        write_tmp_file(self.result_file, self.result_content)

        # 将日志记录置为无效，以免被其他备份任务使用
        ret_code = change_archive_log_unavailable(self._job_id, self.run_rman_params, 1, self.archive, "data")
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Change archive log unavailable failed, pid:{self._job_id}.")
            return ret_code
        return ScriptExitCode.SUCCESS

    def set_backup_log_parameters(self):
        logger.info(
            f"PID={self._job_id},DBINSTANCE={self.params.get('InstanceName')},DBNAME={self.db_name},"
            f"DBUUID={self.db_uuid},DBUSER={self.db_user},DBCHANNEL={self.channels},ARCHIVE={self.archive},"
            f"QOS={self.qos},TRUNCATELOG={self.truncate_log},ENCALGO={self.params.get('EncAlgo')},"
            f"LAST_BACKUP_SCN={self.last_backup_scn},LAST_BACKUP_RESET_LOGS_ID={self.last_backup_reset_logs_id},"
            f"IPPORTINFO={self.ip_port_info},NODENUMBER={self.node_number}")

        if self.channels == 0:
            self.channels = 4
            logger.warning(f"Setting channels number to {self.channels} by default.")

        if self.channels > 254:
            logger.error("Channel params is invalid.")
            return RETURN_INVALID

        enc_algo = self.params.get('EncAlgo')
        enc_key = self.params.get('EncKey')
        if enc_algo and enc_key:
            self.is_enc_bk = 1
            self.rman_enc_section = f"configure encryption for database on;\n" \
                                    f"configure encryption algorithm '{enc_algo}';\n" \
                                    f"set encryption on identified by \"{enc_key}\" only;\n" \
                                    f"set decryption identified by \"{enc_key}\";\n"
            logger.info(f"Backup database will enc.")
        return ScriptExitCode.SUCCESS

    def backup_log_check_host_environment(self):
        log_is_backed_up = ""
        db_instance = ""
        self.db_is_cluster = check_oracle_install_type()
        [self.ora_version, self.ora_pre_version, self.ora_pre_version_extend] = get_oracle_version(self._pid)
        if not check_sqlplus_status() or not check_rman_status():
            return RETURN_INVALID, log_is_backed_up, db_instance

        # 初始化登录rman/oracle常用参数
        self.init_login_params()

        db_instance = get_real_instance_name(self.login_db_params, self.db_is_cluster, self.db_name)
        if not db_instance:
            logger.error(f"DB real instance name is invalid.")
            return RETURN_INVALID, log_is_backed_up, db_instance
        logger.info(f"Check cluster [db_is_cluster={self.db_is_cluster}], db_instance={db_instance}.")

        ret_code, base_info = get_database_info(self._job_id, db_instance, self.db_user, self.db_password)
        if ret_code != ScriptExitCode.SUCCESS or not base_info:
            logger.error(f"Get database base info failed.")
            return ret_code, log_is_backed_up, db_instance

        [log_mode, db_id, uniq_name, self.open_mode, incarnation, self.resetlogs_id] = base_info
        if log_mode != ArchiveLogMode.ARCHIVELOG.value:
            logger.error(f"Archive Mode=No Archive Mode, check archive mode failed.")
            return ScriptExitCode.ERROR_ORACLE_NOARCHIVE_MODE, log_is_backed_up, db_instance
        create_dir(self._job_id, os.path.join(self.archive, f"resetlogs_id{self.resetlogs_id}"), self.asm_id_name,
                   self.oracle_home)
        log_is_backed_up = f"{LOG_IS_VALID} and name like '{self.archive}/resetlogs_id{self.resetlogs_id}/arch_%'"
        return ScriptExitCode.SUCCESS, log_is_backed_up, db_instance

    def backup_log_prepare_for_backup(self, log_is_backed_up, db_instance):
        # from scn
        logger.info(f"Begin to exec SQL Get From Scn")
        if self.last_backup_scn:
            if str(self.last_backup_reset_logs_id) != str(self.resetlogs_id):
                logger.error(f"Last_backup_resetlogs_id({self.last_backup_reset_logs_id}) not match current "
                             f"resetlogs_id({self.resetlogs_id}), backup data first.")
                return ScriptExitCode.ERROR_ORACLE_FIRST_BACKUP_FAILED
            self.from_scn = self.last_backup_scn
            logger.info(f"Last_backup_scn={self.from_scn}")
        else:
            ret, last_log_info = get_last_log_scn(self.login_db_params, "", log_is_backed_up)
            if ret != ScriptExitCode.SUCCESS:
                logger.error(f"Get Last archive log failed")
                return RETURN_INVALID
            self.from_scn = last_log_info[0]
            logger.info(f"Last backup archive log is {self.from_scn}.")

        ret, node_list = get_database_node_list(self._job_id, db_instance, self.db_user, self.db_password)
        if ret != ScriptExitCode.SUCCESS:
            logger.error(f"Get database node list failed")
            return RETURN_INVALID
        # TODO手动归档，产生归档日志
        if self.open_mode != "READONLY" and self.open_mode != "READONLYWITHAPPLY":
            ret_code = create_archive_log(self._job_id, db_instance, self.db_user, self.db_password)
            if ret_code != ScriptExitCode.SUCCESS:
                logger.error(f"Create archive log failed, pid:{self._job_id}.")
                return ret_code
        ret_code = check_archive_log_lost(self.login_db_params, self.from_scn, self.resetlogs_id, node_list)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.error(f"Can not exec log backup must be a full backup.")
            return ret_code
        backup_log_path = get_install_head_path().upper()
        logger.info(f"Get backup log path: {backup_log_path}, pid: {self._job_id}.")
        self.log_num = get_archive_log_num(self.login_db_params, self.from_scn, backup_log_path)
        ret = check_standby_database_sequence(self.login_db_params, self.oracle_home)
        if ret != ScriptExitCode.SUCCESS:
            logger.error(f"The max sequence of primary and standby database is not consistent!")
            return ScriptExitCode.ERROR_PRIMARY_STANDBY_DIFFERENT
        logger.info(f"Success prepare log for backup, log_num={self.log_num}, pid={self._job_id}.")

        return ScriptExitCode.SUCCESS

    def after_backup_log_file(self):
        # get archive log range
        ret = get_archivelog_current_range(self.login_db_params, self.archive, self.resetlogs_id, self.from_scn)
        if ret != ScriptExitCode.SUCCESS:
            logger.error("get_archivelog_current_range failed! ret = %d, pid = %s", ret, self._job_id)
            return ret

        log_backup_rst = get_all_archivelog_range(self._job_id, self.archive)
        result_file = f"{PluginPathConstant.WINDOWS_TMP_PATH}/result_tmp{self._job_id}"
        write_tmp_file(result_file, log_backup_rst)

        remove_path = f"{self.archive}\\{self.last_backup_scn}"
        delete_file(remove_path)

        # 将日志记录置为无效，以免被其他备份任务使用
        ret = change_archive_log_unavailable(self._job_id, self.run_rman_params, 1, self.archive, "log")
        if ret != ScriptExitCode.SUCCESS:
            logger.error("change_archive_log_unavailable failed! ret = %d, pid = %s", ret, self._job_id)
            return ret
        logger.info("Do log backup success! pid = %s", self._job_id)
        return ScriptExitCode.SUCCESS

    def backup_log_windows(self):
        ret = self.set_backup_log_parameters()
        if ret != ScriptExitCode.SUCCESS:
            return RETURN_INVALID

        ret, log_is_backed_up, db_instance = self.backup_log_check_host_environment()
        if ret != ScriptExitCode.SUCCESS:
            return ret

        ret = self.backup_log_prepare_for_backup(log_is_backed_up, db_instance)
        if ret != ScriptExitCode.SUCCESS:
            return ret

        logger.info(f"Building RMAN Log backup script, pid：{self._job_id}.")
        log_backup_params = self.get_build_log_backup_params()
        log_backup_sql_cmd = build_backup_archive_log_sql(log_backup_params)

        logger.info(f"Running RMAN to backup archive logs, pid：{self._job_id}.")
        ret_code = backup_windows_log(self._job_id, log_backup_sql_cmd, self.run_rman_params, enc_type=1)
        if ret_code != ScriptExitCode.SUCCESS:
            return ret_code
        logger.info(f"Backup database-{db_instance} archive log success, pid：{self._job_id}.")

        ret = self.after_backup_log_file()
        if ret != ScriptExitCode.SUCCESS:
            return ret
        return ScriptExitCode.SUCCESS


def check_backup_archive_log_lost(params, cmd_param, node_list):
    pid = params.get("pid", "")
    instance_name = params.get("instance_name", "")
    db_user = params.get("db_user", "")
    from_scn = cmd_param.get("from_scn", "")
    end_scn = cmd_param.get("end_scn", "")
    logger.info(f"check_backup_archive_log_lost, pid: {pid}, instance name: {instance_name}, user name: {db_user}.")
    tmp_sql = f"{PluginPathConstant.WINDOWS_TMP_PATH}/get_sequence_and_scn_{pid}.sql"
    tmp_rst = f"{PluginPathConstant.WINDOWS_TMP_PATH}/get_sequence_and_scn_rst_{pid}.txt"

    for node in node_list:
        cmd_param["node"] = node
        ret_code = get_sequence_and_scn(params, tmp_sql, tmp_rst, cmd_param)
        if ret_code != ScriptExitCode.SUCCESS:
            logger.info("CheckLogLost failed, ret = %d; pid = %s", ret_code, pid)
            delete_file(tmp_sql)
            delete_file(tmp_rst)
            return ret_code
        first_change_list = []
        next_change_list = []
        sequence_list = []
        # 读取结果文件
        encoding = locale.getdefaultlocale()[1]
        with open(tmp_rst, 'r', encoding=encoding) as file_read:
            for line in file_read.read().splitlines():
                logger.info(f"line = {line}")
                line_value = line.split()
                sequence_list.append(int(line_value[0].strip()))
                first_change_list.append(int(line_value[1].strip()))
                next_change_list.append(int(line_value[2].strip()))
        if not first_change_list:
            logger.error(f"node {node} has log lost. first_change_list is null. Backup failed.")
            continue
        first_change = first_change_list[0]
        logger.info(f"first_change = {first_change}, from_scn = {from_scn}")
        if int(first_change) > int(from_scn):
            logger.error(f"node {node} has log lost. first_change {first_change} > from_scn {from_scn}. Backup failed.")
            continue

        if not check_int_list_is_consecutive(sequence_list):
            logger.error(f"sequence_list {sequence_list} not consecutive. Backup failed.")
            continue
        delete_file(tmp_sql)
        delete_file(tmp_rst)
        logger.info(f"log_is_full")
        return ScriptExitCode.SUCCESS
    delete_file(tmp_sql)
    delete_file(tmp_rst)
    return ScriptExitCode.ERROR_SCRIPT_EXEC_FAILED


def check_int_list_is_consecutive(int_list):
    expected = int_list[0]
    for list_num in int_list:
        if list_num != expected:
            return False
        expected += 1
    return True


def get_sequence_and_scn(params, tmp_sql, tmp_rst, cmd_param):
    pid = params.get("pid", "")
    instance_name = params.get("instance_name", "")
    db_user = params.get("db_user", "")
    db_password = params.get("db_password", "")

    node = cmd_param.get("node", "")
    resetlogs_id = cmd_param.get("resetlogs_id", "")
    from_scn = cmd_param.get("from_scn", "")
    end_scn = cmd_param.get("end_scn", "")
    main_backup_path = cmd_param.get("main_backup_path", "")

    sql_cmd = f"select to_char(sequence#), to_char(first_change#), to_char(next_change#), name from v$archived_log " \
              f"where name like '{main_backup_path}\\log\\arch_%' and THREAD#={node} and RESETLOGS_ID={resetlogs_id} " \
              f"and (deleted = 'NO') and (ARCHIVED='YES') and (STATUS='A') and next_change# > {from_scn} " \
              f"and first_change# <= {end_scn} order by first_change#;".upper()
    content = f"SPOOL {tmp_rst}\n" \
              f"set linesize 999\n" \
              f"COL name FORMAT a500\n" \
              f"COL DB_UNIQUE_NAME FORMAT a20\n" \
              f"{sql_cmd}\n" \
              f"SPOOL OFF" \
              f"exit"
    write_tmp_file(tmp_sql, content)
    set_db_silence_sql(tmp_sql)
    temp_params = {
        'is_silence': 1,
        'db_user': db_user,
        'db_password': db_password
    }
    ret_code = execute_windows_sqlplus_cmd(pid, temp_params, tmp_sql, tmp_rst, instance_name)
    return ret_code
