#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import re
import time

from common import common
from common.common_models import CopyInfoRepModel
from common.const import BackupTypeEnum, RepositoryDataTypeEnum, CMDResult, CopyDataTypeEnum, RepoProtocalType, Encoding
from common.logger import Logger
from common.number_const import NumberConst
from db2.backup.util.db2_backup_util import Db2BackupUtil
from db2.comm.const import Db2JsonConstant, Db2Const
from db2.comm.db2_cmd import get_lang_value
from db2.comm.db2_exception import ErrCodeException
from db2.comm.error_code import Db2ErrCode
from db2.comm.models.backup_models import LogBackupParam
from db2.comm.util.common_util import Db2CommonUtil

LOGGER = Logger().get_logger(filename="db2.log")


class DpfDbBackupService:
    def __init__(self, pid, job_id, sub_job_id, param_util):
        self._pid = pid
        self._job_id = job_id
        self._sub_job_id = sub_job_id
        self.param_util = param_util

    def exec_full_backup_db(self, db_name):
        LOGGER.info(f'Full database backup started. database_name: {db_name} pid: {self._pid}, job_id: {self._job_id}, '
                    f'sub_job_id: {self._sub_job_id}')
        user_name = self.param_util.get_database_user_name(Db2JsonConstant.JOB_PROTECTENV_NODES_0_AUTH_AUTHKEY)
        real_full_path = self.pre_backup_path(user_name, db_name, "full")
        Db2CommonUtil.check_injection(db_name)
        backup_cmd = f'su - {user_name} -c \'db2 backup db {db_name} on all nodes online to {real_full_path}' \
                     f' include logs\''
        return self.execute_backup(db_name, backup_cmd)

    def exec_increment_backup_db(self, db_name):
        LOGGER.info(f'Increment database backup started. database_name: {db_name} pid: {self._pid}, job_id: '
                    f'{self._job_id}, sub_job_id: {self._sub_job_id}')
        user_name = self.param_util.get_database_user_name(Db2JsonConstant.JOB_PROTECTENV_NODES_0_AUTH_AUTHKEY)
        real_increment_path = self.pre_backup_path(user_name, db_name, "incr")
        Db2CommonUtil.check_injection(db_name)
        backup_cmd = f"su - {user_name} -c 'db2 backup db {db_name} on all nodes online incremental delta " \
                     f"to {real_increment_path} include logs'"
        return self.execute_backup(db_name, backup_cmd)

    def exec_diff_backup_db(self, db_name):
        LOGGER.info(f'Individual database backup started. database_name: {db_name} pid: {self._pid}, job_id: '
                    f'{self._job_id}, sub_job_id: {self._sub_job_id}')
        user_name = self.param_util.get_database_user_name(Db2JsonConstant.JOB_PROTECTENV_NODES_0_AUTH_AUTHKEY)
        diff_path = self.pre_backup_path(user_name, db_name, "diff")
        Db2CommonUtil.check_injection(user_name, db_name)
        backup_cmd = f"su - {user_name} -c 'db2 backup db {db_name} on all nodes online incremental " \
                     f"to {diff_path} include logs'"
        return self.execute_backup(db_name, backup_cmd)

    def execute_backup(self, db_name, backup_cmd):
        LOGGER.info(f"Execute backup command: {backup_cmd}.")
        ret_code, std_out, std_err = common.execute_cmd(backup_cmd)
        if "SQL2048N" in std_out and 'Reason code: "6"' in std_out:
            raise ErrCodeException(Db2ErrCode.RUNNING_BACKUP_TASK_EXISTS, db_name)
        if ret_code != '0' or "errors" in std_out:
            LOGGER.error(f'Failed to execute the backup command. return code: {ret_code}, std_out: {std_out}, std_err:'
                         f'{std_err}, pid: {self._pid}, job_id: {self._job_id}, sub_job_id: {self._sub_job_id}')
            raise ErrCodeException(Db2ErrCode.FAILED_EXECUTE_COMMAND, *[backup_cmd, std_out])
        bak_img_timestamp = Db2BackupUtil.parse_backup_image_timestamp(std_out)
        LOGGER.info(f'The backup of the database is complete. database_name: {db_name}, '
                    f'backup image timestamp: {bak_img_timestamp}, pid: {self._pid},'
                    f' job_id: {self._job_id}, sub_job_id: {self._sub_job_id}')
        return True, bak_img_timestamp

    def pre_backup_path(self, user_name, db_name, backup_type):
        LOGGER.info(f'Database backup started. database_name: {db_name} pid: {self._pid}, job_id: '
                    f'{self._job_id}, sub_job_id: {self._sub_job_id}')
        data_path = self.param_util.get_repositories_path(RepositoryDataTypeEnum.DATA_REPOSITORY)
        backup_path = os.path.join(data_path, f'{backup_type}_{self._job_id}')
        Db2CommonUtil.check_injection(backup_path)
        if not os.path.exists(backup_path):
            os.mkdir(backup_path)
            user_id = Db2CommonUtil.get_os_user_id_by_os_user(user_name)
            group_id = Db2CommonUtil.get_group_id_by_os_user(user_name)
            os.lchown(backup_path, user_id, group_id)
        LOGGER.info(f'Get copy path:{backup_path}, pid: {self._pid}, job_id: {self._job_id}, '
                    f'sub_job_id: {self._sub_job_id}')
        return backup_path

    def query_copy_info_handle(self):
        pre_fix_map = {
            BackupTypeEnum.FULL_BACKUP.value: "full",
            BackupTypeEnum.INCRE_BACKUP.value: "incr",
            BackupTypeEnum.DIFF_BACKUP.value: "diff",
        }
        backup_type = self.param_util.get_backup_type()
        pre_fix_str = pre_fix_map.get(backup_type)
        copy_path = f'{pre_fix_str}_{self._job_id}'
        rep_copy_info = self.param_util.get_copy_rep_info(RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        rep_rsp = [
            CopyInfoRepModel(
                id=rep_copy_info.get('id'),
                repositoryType=rep_copy_info.get("repositoryType"),
                isLocal=rep_copy_info.get("isLocal"), protocol=RepoProtocalType.NFS.value,
                remotePath=f"{rep_copy_info.get('remotePath')}/{copy_path}",
                remoteHost=rep_copy_info.get("remoteHost"),
                extendInfo=rep_copy_info.get('extendInfo')
            )
        ]
        return rep_rsp

    def save_backup_time_info(self, bak_img_timestamp, last_archive_log=None, copy_log_ext_info=None, end_time=None):
        backup_info_dict = {
            Db2JsonConstant.BAK_IMG_TIMESTAMP: bak_img_timestamp
        }
        if end_time:
            backup_info_dict[Db2JsonConstant.DB2_DATA_BAK_END_TIME] = end_time
        if last_archive_log:
            backup_info_dict["lastArchiveLogFile"] = last_archive_log
        if copy_log_ext_info:
            backup_info_dict.update(copy_log_ext_info)
        LOGGER.info(f"Start saving backup time info: {backup_info_dict}, job id: {self._job_id}.")
        cache_path = self.param_util.get_repositories_path(RepositoryDataTypeEnum.CACHE_REPOSITORY)
        output_file_name = "{}{}".format("backupTimeInfo", self._job_id)
        output_file_path = os.path.join(cache_path, output_file_name)
        common.output_execution_result(output_file_path, backup_info_dict)
        LOGGER.info(f"Save backup time info success, file: {output_file_path}.")

    def query_backup_time_info(self):
        bak_info_dict = dict()
        LOGGER.info(f"Start querying backup time info, job id: {self._job_id}.")
        cache_path = self.param_util.get_repositories_path(RepositoryDataTypeEnum.CACHE_REPOSITORY)
        output_file_name = "{}{}".format("backupTimeInfo", self._job_id)
        output_file_path = os.path.join(cache_path, output_file_name)
        if not os.path.exists(output_file_path):
            LOGGER.info(f"The file used to save the backup time info does not exist, file: {output_file_path}, "
                        f"job id: {self._job_id}.")
            return bak_info_dict
        with open(output_file_path, "r", encoding=Encoding.INTERNAL_ENCODING) as tmp_file:
            bak_info_dict = json.loads(tmp_file.read())
        LOGGER.info(f"Query backup time info success, content: {bak_info_dict}, file: {output_file_path}.")
        return bak_info_dict

    def abort_database_backup_job(self, user_name, db_name):
        Db2CommonUtil.check_injection(user_name, db_name)
        find_backup_cmd = [f"su - {user_name} -c 'db2 list application show detail'", "grep Backup", f"grep {db_name}"]
        std_out = self.execute_abort_sh(find_backup_cmd)
        if not std_out:
            return True
        backup_info = std_out.split(" ")
        Db2CommonUtil.check_injection(backup_info[2])
        stop_cmd = f"su - {user_name} -c 'db2 \"force application {backup_info[2]}\"'"
        stop_out = self.execute_abort_sh(stop_cmd)
        if not stop_out:
            return False
        # SQL0104N 该备份任务不存在
        if "successfully" in stop_out or "SQL0104N" in stop_out:
            return True
        return False

    def execute_abort_sh(self, cmd_sh):
        if isinstance(cmd_sh, list):
            ret_code, std_out, std_err = common.execute_cmd_list(cmd_sh)
        else:
            ret_code, std_out, std_err = common.execute_cmd(cmd_sh)
        if ret_code != CMDResult.SUCCESS.value or "errors" in std_out:
            LOGGER.error(f"Execute cmd error {str(std_out)}, job id: {self._job_id}.")
            return False
        return std_out

    def backup_post_job(self, backup_result):
        # 备份失败的话
        if backup_result != 0:
            meta_path = self.param_util.get_repositories_path(RepositoryDataTypeEnum.META_REPOSITORY)
            common.clean_dir(meta_path)
            data_path = self.param_util.get_repositories_path(RepositoryDataTypeEnum.DATA_REPOSITORY)
            common.clean_dir(data_path)
        cache_path = self.param_util.get_repositories_path(RepositoryDataTypeEnum.CACHE_REPOSITORY)
        common.clean_dir(cache_path)

    def handle_archive_log_for_log_backup(self, os_user, db_name):
        """处理手动归档日志"""
        LOGGER.info(f"Handling manual archive log, os user: {os_user}, database: {db_name}.")
        cache_path = self.param_util.get_repositories_path(RepositoryDataTypeEnum.CACHE_REPOSITORY.value)
        app_info = self.param_util.get_applications()
        since_timestamp = Db2BackupUtil.build_list_his_since_timestamp_for_log_bak(
            app_info, self._job_id, self._sub_job_id, cache_path)
        Db2CommonUtil.activate_db_status(os_user, db_name)
        # 手动归档日志
        Db2BackupUtil.execute_manual_archive_log(os_user, db_name)
        # 查找最新手动归档日志
        archive_log, arch_cmd_end_time = Db2BackupUtil.get_latest_archive_log(
            os_user, db_name, since_timestamp=since_timestamp)
        log_chain = Db2CommonUtil.get_current_log_chain(os_user, db_name)
        # 查询最新手动归档日志的开始时间、结束时间
        log_param = LogBackupParam(user_name=os_user, db_name=db_name,
                                   log_chain=log_chain, since_timestamp=since_timestamp)
        log_time_tuple = Db2BackupUtil.get_arch_log_time(
            log_param, archive_log, arch_cmd_end_time=arch_cmd_end_time)
        num_list = Db2CommonUtil.get_partition_no(os_user, db_name, Db2Const.DPF_CLUSTER_TYPE)
        copy_log_ext_info = {
            Db2JsonConstant.COPY_LOG_EXT_KEY: {
                Db2JsonConstant.LOG_CHAIN_KEY: log_chain,
                Db2JsonConstant.PTN_NUM_LOG_MAP_KEY: {}
            }
        }
        for ptn_num in num_list:
            tmp_archive_log, _ = Db2BackupUtil.get_latest_archive_log(
                os_user, db_name, since_timestamp=since_timestamp, ptn_num=ptn_num,
                deploy_type=Db2Const.DPF_CLUSTER_TYPE)
            copy_log_ext_info[Db2JsonConstant.COPY_LOG_EXT_KEY][Db2JsonConstant.PTN_NUM_LOG_MAP_KEY][ptn_num] = \
                tmp_archive_log
        self.save_backup_time_info(log_time_tuple[1], last_archive_log=archive_log, copy_log_ext_info=copy_log_ext_info)
        Db2CommonUtil.deactivate_db_status(os_user, db_name)
        LOGGER.info(f"Handle manual archive log success, database: {db_name}.")

    def exec_log_backup_db(self, db_name, is_catalog_node):
        LOGGER.info(f'Database log backup started. database_name: {db_name} pid: {self._pid}, job_id: '
                    f'{self._job_id}, sub_job_id: {self._sub_job_id}')
        user_name = self.param_util.get_database_user_name(Db2JsonConstant.JOB_PROTECTENV_NODES_0_AUTH_AUTHKEY)
        log_chain = Db2CommonUtil.get_current_log_chain(user_name, db_name)
        cache_path = self.param_util.get_repositories_path(RepositoryDataTypeEnum.CACHE_REPOSITORY.value)
        cur_node_ptn_nums = Db2CommonUtil.get_partition_no_by_hostname(user_name)
        if not cur_node_ptn_nums:
            LOGGER.error(f"The value of hostname is different from that in the configuration file.")
            raise ErrCodeException(Db2ErrCode.USER_BACKUP_ERROR, db_name)
        app_info = self.param_util.get_applications()
        since_timestamp = Db2BackupUtil.build_list_his_since_timestamp_for_log_bak(
            app_info, self._job_id, self._sub_job_id, cache_path)
        log_dict_param = LogBackupParam(user_name=user_name, db_name=db_name, log_chain=log_chain,
                                        cache_path=cache_path,
                                        cur_node_ptn_nums=cur_node_ptn_nums,
                                        is_catalog_node=is_catalog_node, since_timestamp=since_timestamp)
        ptn_log_locs_map = self.get_ptn_logs_dict_of_cur_node(log_dict_param)
        log_mount_path = self.param_util.get_repositories_path(RepositoryDataTypeEnum.LOG_REPOSITORY.value)
        # 在日志仓挂载目录创建NODExxxx目录
        Db2CommonUtil.create_node_dir_for_dpf_log_backup(user_name, log_mount_path, cur_node_ptn_nums)
        log_path_list = []
        for ptn_num in cur_node_ptn_nums:
            cur_node_name = f'NODE{str(ptn_num).rjust(4, "0")}'
            tmp_archive_log_locs = ptn_log_locs_map.get(str(ptn_num), [])
            log_path_list.extend(tmp_archive_log_locs)
            self.backup_archive_logs_for_ptn(user_name, tmp_archive_log_locs, log_mount_path, cur_node_name)
        LOGGER.info(f'Database log backup success. database_name: {db_name} pid: {self._pid}, job_id: '
                    f'{self._job_id}, sub_job_id: {self._sub_job_id}')
        if self.param_util.get_delete_log() == "true":
            for log_path in log_path_list:
                if os.path.isfile(log_path):
                    Db2BackupUtil.remove_temp_file_ignore_exception(log_path)
        return True

    def get_ptn_logs_dict_of_cur_node(self, log_dict_param):
        ptn_log_locs_map = dict()
        # 获取依赖的最新副本信息
        pre_copy_ext_info, depend_copy_type = self.get_previous_copy_ext_info(log_dict_param.cache_path,
                                                                              log_dict_param.log_chain)
        # 依赖日志副本且日志文件不完整，则需要查找上一个全量副本
        if depend_copy_type == CopyDataTypeEnum.LOG_COPY.value:
            # 依赖日志副本时检查日志完整性
            is_complete, ptn_log_locs_map = self.check_integrity_of_all_need_bak_logs(log_dict_param, pre_copy_ext_info,
                                                                                      is_depend_log=True)
            if not is_complete:
                # 依赖日志副本时日志不完整，则转为依赖全量
                pre_copy_ext_info, depend_copy_type = self.get_pre_full_log_ext_info(log_dict_param.cache_path,
                                                                                     log_dict_param.log_chain)
        if depend_copy_type == CopyDataTypeEnum.FULL_COPY.value:
            # 依赖全量副本时检查日志完整性
            is_complete, ptn_log_locs_map = self.check_integrity_of_all_need_bak_logs(log_dict_param, pre_copy_ext_info,
                                                                                      is_depend_log=False)
            if not is_complete:
                # 依赖全量副本时日志不完整，则报错
                raise ErrCodeException(Db2ErrCode.ERR_INVALID_LOG_COPY)
        pre_copy_bak_img_time = pre_copy_ext_info.get(Db2JsonConstant.BAK_IMG_TIMESTAMP)
        if not pre_copy_bak_img_time:
            err_msg = "The backup image timestamp of previous copy is empty when executing log backup"
            LOGGER.error(err_msg)
            raise ErrCodeException(Db2ErrCode.USER_BACKUP_ERROR, message=err_msg)
        if log_dict_param.is_catalog_node:
            # 在编目节点执行时写入依赖副本信息供上报副本使用
            depend_copy_end_time = pre_copy_ext_info.get(Db2JsonConstant.COPY_END_TIME) \
                if depend_copy_type == CopyDataTypeEnum.LOG_COPY.value \
                else pre_copy_ext_info.get(Db2JsonConstant.COPY_BAK_TIME)
            depend_copy_info = {
                Db2JsonConstant.LOG_BAK_DEPEND_COPY_TYPE: depend_copy_type,
                Db2JsonConstant.ASSOCIATED_COPIES: pre_copy_ext_info.get(Db2JsonConstant.ASSOCIATED_COPIES, []),
                Db2JsonConstant.COPY_END_TIME: depend_copy_end_time
            }
            self.write_depend_copy_info_for_log_bak(log_dict_param.cache_path, depend_copy_info)
        LOGGER.info(f"The copy type that log backup depends on is {depend_copy_type}, backup time: "
                    f"{pre_copy_bak_img_time}, database: {log_dict_param.db_name}.")
        return ptn_log_locs_map

    def check_integrity_of_all_need_bak_logs(self, log_dict_param: LogBackupParam, pre_copy_ext_info,
                                             is_depend_log=True):
        """
        检查所有需要备份日志的完整性
        :param log_dict_param: 日志参数
        :param pre_copy_ext_info: 日志备份依赖副本的扩展信息
        :param is_depend_log: 日志备份依赖日志副本，True：依赖；False：不依赖
        :return: (bool, dict) True完整/False不完整；分片号和需要备份日志列表的映射
        """
        LOGGER.info(
            f"Start checking integrity of all need backup logs, database: {log_dict_param.db_name}, "
            f"log chain: {log_dict_param.log_chain}, "
            f"is depend log: {is_depend_log}.")
        cache_path = self.param_util.get_repositories_path(RepositoryDataTypeEnum.CACHE_REPOSITORY)
        # 获取日志归档信息
        bak_info_dict = self.query_backup_time_info()
        ptn_log_locs_map = dict()
        for ptn_num in log_dict_param.cur_node_ptn_nums:
            tmp_stop_log = bak_info_dict.get(Db2JsonConstant.COPY_LOG_EXT_KEY, {}) \
                .get(Db2JsonConstant.PTN_NUM_LOG_MAP_KEY, {}).get(ptn_num)
            if not tmp_stop_log:
                err_msg = f"The archive log of partition {ptn_num} is empty when executing log backup"
                LOGGER.error(err_msg)
                raise ErrCodeException(Db2ErrCode.USER_BACKUP_ERROR, message=err_msg)
            tmp_pre_copy_stop_log = pre_copy_ext_info.get(Db2JsonConstant.COPY_LOG_EXT_KEY, {}) \
                .get(Db2JsonConstant.PTN_NUM_LOG_MAP_KEY, {}).get(ptn_num)
            log_dict_param.deploy_type = Db2Const.DPF_CLUSTER_TYPE
            tmp_archive_log_locs = Db2CommonUtil.get_need_backup_logs_for_partition(log_dict_param,
                                                                                    tmp_pre_copy_stop_log,
                                                                                    tmp_stop_log,
                                                                                    ptn_num=ptn_num,
                                                                                    is_depend_log=is_depend_log)
            LOGGER.info(
                f"Partition {ptn_num} need backup logs: {tmp_archive_log_locs}, database: {log_dict_param.db_name}.")
            # 检查日志文件是否完整
            is_completed = Db2CommonUtil.check_file_path_has_existed(tmp_archive_log_locs)
            LOGGER.info(f"The partition {ptn_num} archive logs integrity: {is_completed}.")
            self.write_ptn_log_integrity_info(ptn_num, is_completed, cache_path, is_depend_log)
            if not is_completed:
                return False, dict()
            ptn_log_locs_map[str(ptn_num)] = tmp_archive_log_locs
        db_ptn_nums = Db2CommonUtil.get_database_partitionnums(log_dict_param.user_name, log_dict_param.db_name).split(
            ",")
        check_result = self.check_all_log_integrity(cache_path, db_ptn_nums, is_depend_log)
        if not check_result:
            LOGGER.warning(f"Check integrity of all need backup logs fail, database: {log_dict_param.db_name}.")
            return False, dict()
        LOGGER.info(f"Check integrity of all need backup logs success, database: {log_dict_param.db_name}.")
        return True, ptn_log_locs_map

    def write_ptn_log_integrity_info(self, ptn_num, is_completed, cache_path, is_depend_log):
        """写入分片的完整性信息"""
        LOGGER.info(f"Creating partition log integrity file, job id: {self._job_id}.")
        depend_tag = Db2Const.LOG_INTEGRITY_DEPEND_LOG_TAG if is_depend_log else Db2Const.LOG_INTEGRITY_DEPEND_DATA_TAG
        rec_f_name = f"{self._job_id}_{depend_tag}_{ptn_num}_{is_completed}"
        Db2CommonUtil.check_injection(rec_f_name)
        rec_f_path = os.path.join(cache_path, rec_f_name)
        common.touch_file(rec_f_path)
        LOGGER.info(f"Create partition log integrity file: {rec_f_name} success.")

    def check_all_log_integrity(self, cache_path, db_ptn_nums, is_depend_log):
        """检查所有分片日志完整性"""
        LOGGER.info(f"Checking archive log integrity on all partitions: {db_ptn_nums}, job id: {self._job_id}")
        cur_time = int(time.time())
        # 查询超时900s（15min）
        timeout_time = cur_time + 900
        ptn_num_set = set(db_ptn_nums)
        existed_ptn_num_set = set()
        depend_tag = Db2Const.LOG_INTEGRITY_DEPEND_LOG_TAG if is_depend_log else Db2Const.LOG_INTEGRITY_DEPEND_DATA_TAG
        log_integrity_end_reg = r"(\d{1,4})_(True|False)$"
        log_integrity_reg = f"{self._job_id}_{depend_tag}_{log_integrity_end_reg}"
        while int(time.time()) <= timeout_time:
            for f_name in os.listdir(cache_path):
                mat_ret = re.search(log_integrity_reg, f_name)
                if not mat_ret:
                    continue
                ptn_num, is_completed = mat_ret.groups()[0], "True" == mat_ret.groups()[1]
                if not is_completed:
                    LOGGER.warning(f"The archive log on partition {ptn_num} is incompleted.")
                    return False
                existed_ptn_num_set.add(ptn_num)
            LOGGER.info(f"Check archive log integrity on all partitions, existed partitions: {existed_ptn_num_set}.")
            if ptn_num_set == existed_ptn_num_set:
                LOGGER.info("Check archive log integrity on all partitions success.")
                return True
            time.sleep(NumberConst.TEN)
        LOGGER.warning(f"Check archive log integrity on all partitions timeout, "
                       f"existed partitions: {existed_ptn_num_set}.")
        return False

    def write_depend_copy_info_for_log_bak(self, cache_path, depend_copy_info_dict):
        """写入日志备份依赖副本的信息"""
        LOGGER.info(f"Writing log backup dependent copy info: {depend_copy_info_dict}, job id: {self._job_id}.")
        depend_copy_info_f_name = f"{self._job_id}_{Db2Const.LOG_BAK_DEPEND_COPY_FILE_TAG}"
        Db2CommonUtil.check_injection(depend_copy_info_f_name)
        depend_copy_info_f_path = os.path.join(cache_path, depend_copy_info_f_name)
        common.output_execution_result(depend_copy_info_f_path, depend_copy_info_dict)
        LOGGER.info(f"Write log backup dependent copy info success, job id: {self._job_id}.")

    def read_depend_copy_info_for_log_bak(self, cache_path):
        """读取日志备份依赖副本的信息"""
        LOGGER.info(f"Reading log backup dependent copy info, job id: {self._job_id}.")
        depend_copy_info_f_name = f"{self._job_id}_{Db2Const.LOG_BAK_DEPEND_COPY_FILE_TAG}"
        depend_copy_info_f_path = os.path.join(cache_path, depend_copy_info_f_name)
        if not os.path.exists(depend_copy_info_f_path):
            LOGGER.warning(f"The {depend_copy_info_f_name} does not exists.")
            return dict()
        Db2CommonUtil.check_path_list(depend_copy_info_f_path)
        with open(depend_copy_info_f_path, "r", encoding=Encoding.INTERNAL_ENCODING) as tmp_file:
            depend_copy_info_dict = json.loads(tmp_file.read())
        LOGGER.info(f"Read log backup dependent copy info: {depend_copy_info_dict} success, job id: {self._job_id}.")
        return depend_copy_info_dict

    def backup_archive_logs_for_ptn(self, user_name, tmp_archive_log_locs, log_mount_path, cur_node_name):
        Db2CommonUtil.check_os_user_with_ex(user_name)
        tgt_path = os.path.realpath(os.path.join(log_mount_path, cur_node_name))
        node_name_start_reg = r"(.+/"
        node_name_end_reg = r"/)"
        node_name_reg = f"{node_name_start_reg}{cur_node_name}{node_name_end_reg}"
        for tmp_log_loc in tmp_archive_log_locs:
            match_ret = re.match(node_name_reg, tmp_log_loc)
            if not match_ret:
                LOGGER.error(f"The archive log path: {tmp_log_loc} is invalid.")
                continue
            tmp_node_path = match_ret.groups()[0]
            tmp_no_node_path = tmp_log_loc.split(tmp_node_path)[1]
            cp_cmd = f'su - {user_name} -c "cd {tmp_node_path} && cp --parents -f {tmp_no_node_path} {tgt_path} ' \
                     f'&& echo result=$?'
            Db2CommonUtil.execute_echo_result_command(cp_cmd, encoding=get_lang_value(user_name))
        LOGGER.info(f"backup_archive_logs_for_ptn success, job id: {self._job_id}.")

    def get_previous_copy_ext_info(self, cache_path, cur_log_chain):
        """获取日志备份时依赖的上一个副本的扩展信息"""
        LOGGER.info(f"Start to get previous copy info on log backup, current log chain: {cur_log_chain}.")
        app_type = self.param_util.get_applications()
        # 日志备份：首先查找最新的日志副本
        latest_log_copy_info = Db2BackupUtil.query_pre_copy_info_by_rpc_tool(
            app_type, self._job_id, self._sub_job_id, cache_path, [CopyDataTypeEnum.LOG_COPY.value])
        # 存在最新的日志副本
        if latest_log_copy_info:
            latest_log_copy_ext_info = latest_log_copy_info.get("extendInfo", {})
            copy_log_chain = latest_log_copy_ext_info.get(Db2JsonConstant.COPY_LOG_EXT_KEY, {}) \
                .get(Db2JsonConstant.LOG_CHAIN_KEY)
            LOGGER.info(f"Get previous log backup copy info success, copy log chain: {copy_log_chain}, "
                        f"current log chain: {cur_log_chain}.")
            # 最新日志副本log chain和当前log chain一致
            if cur_log_chain and cur_log_chain == copy_log_chain:
                return latest_log_copy_ext_info, CopyDataTypeEnum.LOG_COPY.value
            LOGGER.warning(f"Current log chain: {cur_log_chain} is inconsistent with log copy "
                           f"log chain: {copy_log_chain}.")
        # 日志备份：不存在最新的日志副本或者log chain不一致，再查找依赖的全量副本
        ret, copy_type = self.get_pre_full_log_ext_info(cache_path, cur_log_chain)
        return ret, copy_type

    def get_pre_full_log_ext_info(self, cache_path, cur_log_chain):
        app_type = self.param_util.get_applications()
        latest_full_copy_info = Db2BackupUtil.query_pre_copy_info_by_rpc_tool(
            app_type, self._job_id, self._sub_job_id, cache_path, [CopyDataTypeEnum.FULL_COPY.value])
        # 不存在最新的全量副本
        if not latest_full_copy_info:
            LOGGER.error(f"Get previous full backup copy info failed, job id: {self._job_id}.")
            raise ErrCodeException(Db2ErrCode.NOT_EXIT_WAL_BACKUP_FILE_AND_SNAPSHOT_BACKUP)
        latest_full_copy_ext_info = latest_full_copy_info.get("extendInfo", {})
        copy_log_chain = latest_full_copy_ext_info.get(Db2JsonConstant.COPY_LOG_EXT_KEY, {}) \
            .get(Db2JsonConstant.LOG_CHAIN_KEY)
        LOGGER.info(f"Get previous full backup copy info success, extend info: {latest_full_copy_ext_info}, "
                    f"full copy log chain: {copy_log_chain}, current log chain: {cur_log_chain}.")
        # 最新全量副本log chain和当前log chain不一致
        if cur_log_chain and cur_log_chain != copy_log_chain:
            LOGGER.warning(f"Current log chain: {cur_log_chain} is inconsistent with full copy "
                           f"log chain: {copy_log_chain}.")
            raise ErrCodeException(Db2ErrCode.ERR_RESTORED)
        return latest_full_copy_ext_info, CopyDataTypeEnum.FULL_COPY.value
