#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import platform
import re
import time

from common.common import is_clone_file_system
from common.const import CopyDataTypeEnum
from common.file_common import check_path_validity
from common.logger import Logger
from common.number_const import NumberConst
from common.util.exec_utils import exec_ln_cmd
from db2.backup.util.db2_backup_util import Db2BackupUtil
from db2.comm.const import Db2CommonConst, Db2JsonConstant
from db2.comm.db2_cmd import get_lang_value
from db2.comm.models.restore_models import LogRestoreParam
from db2.comm.util.common_util import Db2CommonUtil
from db2.comm.util.dpf_util import DpfUtil
from db2.comm.util.param_util import Db2ParamUtil

LOGGER = Logger().get_logger(filename="db2.log")


class DpfDbRestoreService:
    @staticmethod
    def exec_full_restore_db(os_user, db_name, param_dict):
        """执行DPF集群数据库全量副本恢复"""
        cache_path = Db2ParamUtil.get_cache_mount_path_for_restore(param_dict)
        tmp_log_dir = DpfUtil.create_tmp_log_dir(cache_path, os_user)
        copies = Db2ParamUtil.parse_copies(param_dict)
        full_copy_path = Db2ParamUtil.get_full_copy_path_when_restore(param_dict)
        if is_clone_file_system(param_dict):
            Db2CommonUtil.modify_path_permissions(full_copy_path)
        full_copy = Db2ParamUtil.get_copies_by_copy_type(copies, CopyDataTypeEnum.FULL_COPY.value)[0]
        full_bak_time = Db2ParamUtil.handle_backup_image_timestamp_of_copy(full_copy)
        new_log_path = DpfUtil.create_new_log_path_for_restore_db(os_user, db_name, param_dict)
        Db2CommonUtil.check_os_user_with_ex(os_user)
        Db2CommonUtil.check_injection(db_name, str(full_bak_time))
        Db2CommonUtil.check_path_list(full_copy_path, tmp_log_dir)
        if Db2ParamUtil.is_new_location_restore(param_dict):
            src_db_name = Db2ParamUtil.get_db_name_of_db_copy(param_dict)
            db_install_path = Db2CommonUtil.get_db_info_dict(os_user).get(db_name, {}).get("localDatabaseDirectory", "")
            db_path_cmd = DpfDbRestoreService.build_restore_dbpath_param(db_install_path)
            full_restore_db_cmd = f'su - {os_user} -c "db2_all db2 restore db {src_db_name} from {full_copy_path} ' \
                                  f'taken at {full_bak_time}{db_path_cmd} ' \
                                  f'into {db_name} logtarget {tmp_log_dir} newlogpath {new_log_path} ' \
                                  f'without prompting"'
        else:
            full_restore_db_cmd = f'su - {os_user} -c "db2_all db2 restore db {db_name} from {full_copy_path} ' \
                                  f'taken at {full_bak_time} logtarget {tmp_log_dir} newlogpath {new_log_path} ' \
                                  f'without prompting"'
        Db2CommonUtil.disconnect_db_application(os_user, db_name)
        Db2CommonUtil.deactivate_db_status(os_user, db_name)
        DpfUtil.execute_restore_db(full_restore_db_cmd)
        DpfUtil.check_rollforward_pending_after_exec_restore_db(os_user, db_name)
        rollforward_db_cmd = f'su - {os_user} -c "db2 rollforward database {db_name} to end of backup and complete ' \
                             f'overflow log path \({tmp_log_dir}\) NORETRIEVE"'
        DpfUtil.execute_rollforward_db(rollforward_db_cmd)

    @staticmethod
    def exec_incr_restore_db(os_user, db_name, param_dict):
        """执行DPF集群数据库增量副本恢复"""
        cache_path = Db2ParamUtil.get_cache_mount_path_for_restore(param_dict)
        tmp_log_dir = DpfUtil.create_tmp_log_dir(cache_path, os_user)
        copies = Db2ParamUtil.parse_copies(param_dict)
        job_id = param_dict.get(Db2JsonConstant.JOB, {}).get(Db2JsonConstant.JOB_ID)
        full_copy_path = Db2ParamUtil.get_full_copy_path_for_incr_and_diff_restore(copies, job_id=job_id)
        incr_copy_paths = Db2ParamUtil.get_incr_copy_paths(copies, job_id=job_id)
        all_copy_paths = [full_copy_path] + incr_copy_paths
        full_copy_path = DpfDbRestoreService.build_full_copy_path(all_copy_paths, cache_path, full_copy_path,
                                                                  param_dict)
        # 将增量副本文件软连接到全量副本目录
        for tmp_incr_copy_path in incr_copy_paths:
            for tmp_f_n in os.listdir(tmp_incr_copy_path):
                tmp_abs_path = os.path.realpath(os.path.join(tmp_incr_copy_path, tmp_f_n))
                if os.path.isfile(tmp_abs_path):
                    Db2CommonUtil.create_soft_link(tmp_abs_path, os.path.join(full_copy_path, tmp_f_n))
        full_copy = Db2ParamUtil.get_copies_by_copy_type(copies, CopyDataTypeEnum.FULL_COPY.value)[0]
        all_incr_copies = Db2ParamUtil.get_copies_by_copy_type(copies, CopyDataTypeEnum.INCREMENT_COPY.value)
        # 手动恢复顺序：最后一个增量->全量->依次从第一个增量到最后一个增量
        manual_restore_copies = [all_incr_copies[-1], full_copy] + all_incr_copies
        src_db_name = Db2ParamUtil.get_db_name_of_db_copy(param_dict)
        is_new_loc = Db2ParamUtil.is_new_location_restore(param_dict)
        db_install_path = ""
        if is_new_loc:
            db_install_path = Db2CommonUtil.get_db_info_dict(os_user).get(db_name, {}).get("localDatabaseDirectory", "")
        db_path_cmd = DpfDbRestoreService.build_restore_dbpath_param(db_install_path)
        new_log_path = DpfUtil.create_new_log_path_for_restore_db(os_user, db_name, param_dict)
        Db2CommonUtil.disconnect_db_application(os_user, db_name)
        Db2CommonUtil.check_os_user_with_ex(os_user)
        Db2CommonUtil.check_injection(db_name)
        Db2CommonUtil.check_path_list(tmp_log_dir, full_copy_path)
        Db2CommonUtil.deactivate_db_status(os_user, db_name)
        restore_num = len(manual_restore_copies)
        for idx, tmp_restore_copy in enumerate(manual_restore_copies):
            # 只在最后一次restore时提取日志
            log_target_cmd = f"logtarget {tmp_log_dir} " if idx == (restore_num - 1) else ""
            tmp_copy_bak_time = Db2ParamUtil.handle_backup_image_timestamp_of_copy(tmp_restore_copy)
            Db2CommonUtil.check_injection(str(tmp_copy_bak_time))
            if is_new_loc:
                tmp_restore_db_cmd = f'su - {os_user} -c "db2_all db2 restore db {src_db_name} incremental ' \
                                     f'from {full_copy_path} taken at {tmp_copy_bak_time}' \
                                     f'{db_path_cmd} into {db_name} {log_target_cmd}' \
                                     f'newlogpath {new_log_path} ' \
                                     f'without prompting"'
                Db2CommonUtil.check_injection(src_db_name)
            else:
                tmp_restore_db_cmd = f'su - {os_user} -c "db2_all db2 restore db {db_name} incremental ' \
                                     f'from {full_copy_path} taken at {tmp_copy_bak_time} ' \
                                     f'{log_target_cmd}newlogpath {new_log_path} without prompting"'
            DpfUtil.execute_restore_db(tmp_restore_db_cmd)
        DpfUtil.check_rollforward_pending_after_exec_restore_db(os_user, db_name)
        rollforward_db_cmd = f'su - {os_user} -c "db2 rollforward database {db_name} to end of backup and complete ' \
                             f'overflow log path \({tmp_log_dir}\) NORETRIEVE"'
        DpfUtil.execute_rollforward_db(rollforward_db_cmd)

    @staticmethod
    def build_full_copy_path(all_copy_paths, cache_path, full_copy_path, param_dict):
        if is_clone_file_system(param_dict):
            for tmp_copy_path in all_copy_paths:
                Db2CommonUtil.modify_path_permissions(tmp_copy_path)
        else:
            for db_file in os.listdir(full_copy_path):
                # 创建软链接
                exec_ln_cmd(os.path.join(full_copy_path, db_file), os.path.join(cache_path, db_file))
            full_copy_path = cache_path
        return full_copy_path

    @staticmethod
    def exec_diff_restore_db(os_user, db_name, param_dict):
        """执行DPF集群数据库差异副本恢复"""
        cache_path = Db2ParamUtil.get_cache_mount_path_for_restore(param_dict)
        tmp_log_dir = DpfUtil.create_tmp_log_dir(cache_path, os_user)
        copies = Db2ParamUtil.parse_copies(param_dict)
        job_id = param_dict.get(Db2JsonConstant.JOB, {}).get(Db2JsonConstant.JOB_ID)
        full_copy_path = Db2ParamUtil.get_full_copy_path_for_incr_and_diff_restore(copies, job_id=job_id)
        diff_copy_path = Db2ParamUtil.get_diff_copy_path(copies, job_id=job_id)
        full_copy_path = DpfDbRestoreService.build_full_copy_path((full_copy_path, diff_copy_path), cache_path,
                                                                  full_copy_path, param_dict)
        # 将差异副本文件软连接到全量副本目录
        for tmp_f_n in os.listdir(diff_copy_path):
            tmp_abs_path = os.path.realpath(os.path.join(diff_copy_path, tmp_f_n))
            if os.path.isfile(tmp_abs_path):
                Db2CommonUtil.create_soft_link(tmp_abs_path, os.path.join(full_copy_path, tmp_f_n))
        full_copy = Db2ParamUtil.get_copies_by_copy_type(copies, CopyDataTypeEnum.FULL_COPY.value)[0]
        diff_copy = Db2ParamUtil.get_copies_by_copy_type(copies, CopyDataTypeEnum.DIFF_COPY.value)[0]
        # 手动恢复顺序：差异->全量->差异
        manual_restore_copies = [diff_copy, full_copy, diff_copy]
        src_db_name = Db2ParamUtil.get_db_name_of_db_copy(param_dict)
        is_new_loc = Db2ParamUtil.is_new_location_restore(param_dict)
        db_install_path = ""
        if is_new_loc:
            db_install_path = Db2CommonUtil.get_db_info_dict(os_user).get(db_name, {}).get("localDatabaseDirectory", "")
        db_path_cmd = DpfDbRestoreService.build_restore_dbpath_param(db_install_path)
        new_log_path = DpfUtil.create_new_log_path_for_restore_db(os_user, db_name, param_dict)
        Db2CommonUtil.disconnect_db_application(os_user, db_name)
        Db2CommonUtil.check_os_user_with_ex(os_user)
        Db2CommonUtil.check_injection(db_name)
        Db2CommonUtil.check_path_list(tmp_log_dir, full_copy_path)
        Db2CommonUtil.deactivate_db_status(os_user, db_name)
        restore_num = len(manual_restore_copies)
        for idx, tmp_restore_copy in enumerate(manual_restore_copies):
            # 只在最后一次restore时提取日志
            log_target_cmd = f"logtarget {tmp_log_dir} " if idx == (restore_num - 1) else ""
            tmp_copy_bak_time = Db2ParamUtil.handle_backup_image_timestamp_of_copy(tmp_restore_copy)
            Db2CommonUtil.check_injection(str(tmp_copy_bak_time))
            if is_new_loc:
                tmp_restore_db_cmd = f'su - {os_user} -c "db2_all db2 restore db {src_db_name} incremental ' \
                                     f'from {full_copy_path} taken at {tmp_copy_bak_time}' \
                                     f'{db_path_cmd} into {db_name} {log_target_cmd}' \
                                     f'newlogpath {new_log_path} ' \
                                     f'without prompting"'
                Db2CommonUtil.check_injection(src_db_name)
            else:
                tmp_restore_db_cmd = f'su - {os_user} -c "db2_all db2 restore db {db_name} incremental ' \
                                     f'from {full_copy_path} taken at {tmp_copy_bak_time} ' \
                                     f'{log_target_cmd}newlogpath {new_log_path} without prompting"'
            DpfUtil.execute_restore_db(tmp_restore_db_cmd)
        DpfUtil.check_rollforward_pending_after_exec_restore_db(os_user, db_name)
        rollforward_db_cmd = f'su - {os_user} -c "db2 rollforward database {db_name} to end of backup and complete ' \
                             f'overflow log path \({tmp_log_dir}\) NORETRIEVE"'
        DpfUtil.execute_rollforward_db(rollforward_db_cmd)

    @staticmethod
    def handle_archive_log_of_log_copy(src_copy_path, tgt_copy_path):
        """合并日志副本"""
        if not os.path.exists(src_copy_path) or not os.path.isdir(src_copy_path):
            LOGGER.warning("Source copy path does not exists or is not directory.")
            return
        if not os.path.exists(tgt_copy_path) or not os.path.isdir(tgt_copy_path):
            LOGGER.warning("Target copy path does not exists or is not directory.")
            return
        for root, _, files in os.walk(src_copy_path):
            root_last_dir_name = str(root).split("/")[-1]
            # 最后一层目录不是“NODE0000”样式的目录不处理
            if not re.match(r"^NODE[\d]{4}$", root_last_dir_name):
                continue
            tgt_tmp_node_dir = os.path.realpath(os.path.join(tgt_copy_path, root_last_dir_name))
            if not os.path.isdir(tgt_tmp_node_dir):
                LOGGER.warning(f"Target log node path: {tgt_tmp_node_dir} is not directory.")
                continue
            for tmp_file in files:
                tmp_src_file = os.path.realpath(os.path.join(root, tmp_file))
                Db2CommonUtil.create_soft_link(tmp_src_file, os.path.join(tgt_tmp_node_dir, tmp_file))

    @staticmethod
    def backup_arch_log_to_target_dir(os_user, src_copy_path, tgt_path):
        """拷贝归档日志到目标目录"""
        LOGGER.info(f"Start copying log copy: {src_copy_path} to target path: {tgt_path} ...")
        if not os.path.exists(src_copy_path) or not os.path.isdir(src_copy_path):
            LOGGER.warning("Source copy path does not exists or is not directory.")
            return
        if not os.path.exists(tgt_path) or not os.path.isdir(tgt_path):
            LOGGER.warning("Target copy path does not exists or is not directory.")
            return
        is_aix = platform.system() == 'AIX'
        Db2CommonUtil.check_os_user_with_ex(os_user)
        Db2CommonUtil.check_injection(src_copy_path, tgt_path)
        if os.path.islink(src_copy_path):
            LOGGER.warning(f"src_copy_path:{src_copy_path} islink")
            return
        if os.path.islink(tgt_path):
            LOGGER.warning(f"tgt_path:{tgt_path} islink")
            return
        if ('../' in src_copy_path) or ('./' in src_copy_path):
            LOGGER.warning(f"src_copy_path:{src_copy_path} not valid")
            return
        if ('../' in tgt_path) or ('./' in tgt_path):
            LOGGER.warning(f"tgt_path:{tgt_path} not valid")
            return
        LOGGER.info("path check complete")
        for i in os.listdir(src_copy_path):
            # 不是“NODE0000”的不处理
            if not re.match(r"^NODE[\d]{4}$", i):
                continue
            tmp_copy_node_path = os.path.realpath(os.path.join(src_copy_path, i))
            if not os.path.isdir(tmp_copy_node_path):
                LOGGER.warning(f"The copy node path: {tmp_copy_node_path} is not directory.")
                continue
            if is_aix:
                Db2BackupUtil.copy_dir_for_aix(os_user, src_copy_path, tgt_path, i)
                continue
            cp_cmd = f'su - {os_user} -c "cd {src_copy_path} && cp --parents -rf {i} {tgt_path} && echo result=$?"'
            Db2CommonUtil.check_injection(str(i))
            Db2CommonUtil.execute_echo_result_command(cp_cmd, encoding=get_lang_value(os_user))
        LOGGER.info(f"Copy log copy: {src_copy_path} to target path: {tgt_path} success.")

    @staticmethod
    def exec_log_restore_db(os_user, db_name, param_dict):
        """执行DPF集群数据库日志副本按任意时间点恢复"""
        restore_timestamp = Db2ParamUtil.get_restore_timestamp(param_dict)
        LOGGER.info(f"The recovery target timestamp is {restore_timestamp} when executing point-in-time recovery.")
        if not restore_timestamp:
            raise Exception("The recovery target timestamp is empty when executing point-in-time recovery")
        # 使用日志副本前滚到指定时间，时间格式需转换为“YYYY-mm-dd-HH.MM.SS”格式
        restore_time = Db2CommonUtil.convert_pitr_timestamp_to_local_time(restore_timestamp)
        copies = Db2ParamUtil.parse_copies(param_dict)
        job_id = param_dict.get(Db2JsonConstant.JOB, {}).get(Db2JsonConstant.JOB_ID)
        log_copy_paths = Db2ParamUtil.get_log_copy_paths(copies, job_id=job_id)
        if not log_copy_paths:
            LOGGER.error("The list of log copy paths is empty.")
            raise Exception("The list of log copy paths is empty")
        LOGGER.info(f"The paths of log copies are {log_copy_paths}.")
        full_copy_path = Db2ParamUtil.get_full_copy_path_when_restore(param_dict)
        all_copy_paths = [full_copy_path] + log_copy_paths
        if is_clone_file_system(param_dict):
            for tmp_copy_path in all_copy_paths:
                Db2CommonUtil.modify_path_permissions(tmp_copy_path)
        Db2CommonUtil.deactivate_db_status(os_user, db_name)
        try:
            full_restore_db_cmd = DpfDbRestoreService.get_restore_cmd(os_user, db_name, param_dict, full_copy_path)
            restore_param = LogRestoreParam(user_name=os_user, db_name=db_name,
                                            full_restore_db_cmd=full_restore_db_cmd, restore_time=restore_time)
            return DpfDbRestoreService.handle_point_in_time_recovery(restore_param, param_dict)
        except Exception as ex:
            LOGGER.exception(f"Execute point in time recovery failed, retry ...")
            full_restore_db_cmd = DpfDbRestoreService.get_restore_cmd(os_user, db_name, param_dict, full_copy_path)
            return DpfDbRestoreService.handle_retry_pit_recovery(os_user, db_name, full_restore_db_cmd, param_dict, ex)

    @staticmethod
    def get_restore_cmd(os_user, db_name, param_dict, full_copy_path):
        cache_path = Db2ParamUtil.get_cache_mount_path_for_restore(param_dict)
        tmp_log_dir = DpfUtil.create_tmp_log_dir(cache_path, os_user)
        copies = Db2ParamUtil.parse_copies(param_dict)
        full_copy = Db2ParamUtil.get_copies_by_copy_type(copies, CopyDataTypeEnum.FULL_COPY.value)[0]
        full_bak_img_time = Db2ParamUtil.handle_backup_image_timestamp_of_copy(full_copy)
        src_db_name = Db2ParamUtil.get_db_name_of_db_copy(param_dict)
        new_log_path = DpfUtil.create_new_log_path_for_restore_db(os_user, db_name, param_dict)
        Db2CommonUtil.check_os_user_with_ex(os_user)
        Db2CommonUtil.check_injection(str(full_bak_img_time), db_name)
        Db2CommonUtil.check_path_list(full_copy_path, tmp_log_dir)
        if Db2ParamUtil.is_new_location_restore(param_dict):
            db_install_path = Db2CommonUtil.get_db_info_dict(os_user).get(db_name, {}).get("localDatabaseDirectory", "")
            db_path_cmd = DpfDbRestoreService.build_restore_dbpath_param(db_install_path)
            Db2CommonUtil.check_injection(src_db_name)
            return f'su - {os_user} -c "db2_all db2 restore db {src_db_name} from {full_copy_path} ' \
                   f'taken at {full_bak_img_time}{db_path_cmd} ' \
                   f'into {db_name} logtarget {tmp_log_dir} newlogpath {new_log_path} ' \
                   f'without prompting"'
        else:
            return f'su - {os_user} -c "db2_all db2 restore db {db_name} from {full_copy_path} ' \
                   f'taken at {full_bak_img_time} logtarget {tmp_log_dir} newlogpath {new_log_path} ' \
                   f'without prompting"'

    @staticmethod
    def handle_retry_pit_recovery(os_user, db_name, full_restore_db_cmd, param_dict, ex):
        """重试任意时间点恢复"""
        last_ts_time = DpfUtil.get_last_commit_transaction_time(os_user, db_name)
        time.sleep(NumberConst.TEN)
        # 判断Restore pending值是否为YES，为YES则执行取消回滚
        restore_pending_val = Db2CommonUtil.get_restore_pending_val_of_db(os_user, db_name)
        if str(restore_pending_val).upper() == "YES":
            DpfUtil.exec_cancel_rollforward_db(os_user, db_name)
            restore_pending_val = Db2CommonUtil.get_restore_pending_val_of_db(os_user, db_name)
            if str(restore_pending_val).upper() == "YES":
                LOGGER.error(f"The restore pending value of database: {db_name} is yes, can't recover anymore.")
                raise ex
        copies = Db2ParamUtil.parse_copies(param_dict)
        full_copy = Db2ParamUtil.get_copies_by_copy_type(copies, CopyDataTypeEnum.FULL_COPY.value)[0]
        full_bak_end_time = Db2ParamUtil.get_backup_end_time_of_copy(full_copy)
        LOGGER.info(f"The last committed transaction time is {last_ts_time}, full copy end time "
                    f"is {full_bak_end_time}.")
        restore_param = LogRestoreParam(user_name=os_user, db_name=db_name,
                                        full_restore_db_cmd=full_restore_db_cmd, restore_time=last_ts_time,
                                        convert_full=True)
        if not Db2ParamUtil.is_restore_end_of_log(param_dict) and full_bak_end_time \
                and DpfDbRestoreService.parse_time_hold_to_sec(last_ts_time) <= full_bak_end_time:
            LOGGER.info(f"The last committed transaction time is less than or equal to full copy end time, "
                        f"need to convert to full recovery.")
            return DpfDbRestoreService.handle_point_in_time_recovery(restore_param, param_dict, retry=True)
        else:
            restore_param.convert_full = False
            return DpfDbRestoreService.handle_point_in_time_recovery(restore_param, param_dict, retry=True)

    @staticmethod
    def build_restore_dbpath_param(db_install_path):
        return f" to {db_install_path}" if db_install_path else ""

    @staticmethod
    def parse_time_hold_to_sec(input_time):
        """
        解析时间字符串：移除非数字字符并保留到秒
        :param input_time: 时间点，样例：'2023-01-01-10.01.01.000000'
        :return: 处理后的时间，样例：'20230101100101'
        """
        replaced_time = str(input_time).replace("-", "").replace(".", "")
        # 保留“年月日时分秒”共14位
        return replaced_time[:14]

    @staticmethod
    def handle_point_in_time_recovery(restore_param: LogRestoreParam, param_dict,
                                      retry=False):
        """
        处理时间点恢复
        :param restore_param 恢复参数
        :param param_dict: 任务参数字典
        :param retry: 是否重试恢复
        """
        cache_path = Db2ParamUtil.get_cache_mount_path_for_restore(param_dict)
        tmp_log_dir = DpfUtil.create_tmp_log_dir(cache_path, restore_param.user_name)
        Db2CommonUtil.disconnect_db_application(restore_param.user_name, restore_param.db_name)
        # 先恢复依赖的全量副本
        DpfUtil.execute_restore_db(restore_param.full_restore_db_cmd)
        log_arch_meth_val = Db2CommonUtil.get_log_arch_meth_val_of_db(restore_param.user_name, restore_param.db_name)
        if log_arch_meth_val and os.path.isdir(log_arch_meth_val):
            inst_name = Db2ParamUtil.get_tgt_inst_name_when_restore_db(param_dict)
            arch_log_path = os.path.realpath(os.path.join(log_arch_meth_val, inst_name, restore_param.db_name))
            # 恢复后清空归档目录
            DpfUtil.create_empty_dir(restore_param.user_name, arch_log_path)
        DpfUtil.check_rollforward_pending_after_exec_restore_db(restore_param.user_name, restore_param.db_name)
        Db2CommonUtil.check_path_list(tmp_log_dir)
        if restore_param.convert_full:
            # 转全量副本恢复
            rollforward_db_cmd = f'su - {restore_param.user_name} ' \
                                 f'-c "db2 rollforward database {restore_param.db_name} to end of backup and ' \
                                 f'complete overflow log path \({tmp_log_dir}\)"'
            Db2CommonUtil.check_os_user_with_ex(restore_param.user_name)
            Db2CommonUtil.check_injection(restore_param.db_name)
            DpfUtil.execute_rollforward_db(rollforward_db_cmd)
            return Db2CommonConst.PIT_RESTORE_SUCCESS_WITH_MIN_TIME
        # 将日志副本文件拷贝到目标数据库归档目录
        LOGGER.info("Start merging db2 log copies ...")
        copies = Db2ParamUtil.parse_copies(param_dict)
        job_id = param_dict.get(Db2JsonConstant.JOB, {}).get(Db2JsonConstant.JOB_ID)
        log_copy_paths = Db2ParamUtil.get_log_copy_paths(copies, job_id=job_id)
        for tmp_copy_path in log_copy_paths:
            DpfDbRestoreService.backup_arch_log_to_target_dir(restore_param.user_name, tmp_copy_path, tmp_log_dir)
        LOGGER.info("Merge db2 log copies success.")
        retry_to_end = (retry and Db2ParamUtil.is_restore_end_of_log(param_dict))
        if DpfDbRestoreService.is_end_of_log(copies, param_dict) or retry_to_end:
            rollforward_db_cmd = f'su - {restore_param.user_name} ' \
                                 f'-c "db2 rollforward database {restore_param.db_name} ' \
                                 f'to end of logs ' \
                                 f'and complete overflow log path \({tmp_log_dir}\) NORETRIEVE"'
            DpfUtil.execute_rollforward_db(rollforward_db_cmd)
            return Db2CommonConst.PIT_RESTORE_SUCCESS_WITH_END_OF_LOGS if retry_to_end else None
        else:
            rollforward_db_cmd = f'su - {restore_param.user_name} ' \
                                 f'-c "db2 rollforward database {restore_param.db_name} to ' \
                                 f'{restore_param.restore_time} USING LOCAL TIME and complete ' \
                                 f'overflow log path \({tmp_log_dir}\) NORETRIEVE"'
            Db2CommonUtil.check_injection(str(restore_param.restore_time))
            DpfUtil.execute_rollforward_db(rollforward_db_cmd)
            return Db2CommonConst.PIT_RESTORE_SUCCESS_WITH_MIN_TIME if retry else ''

    @staticmethod
    def is_end_of_log(copies: list, param_dict):
        end_time_of_copy = Db2ParamUtil.get_log_end_time_of_copy(copies[-1])
        if not end_time_of_copy:
            return False
        restore_timestamp = Db2ParamUtil.get_restore_timestamp(param_dict)
        LOGGER.info(f"End time of copy:{end_time_of_copy}, restore_time:{restore_timestamp}")
        return str(restore_timestamp) == str(end_time_of_copy)
