#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import glob
import json
import os
import random
import shutil
import sqlite3
import stat
import threading
import time
import re
from datetime import datetime
from pathlib import Path

from common.common import output_result_file
from common.common_models import SubJobDetails, LogDetail, SubJobModel
from common.const import ParamConstant, SubJobStatusEnum, RepositoryDataTypeEnum, ExecuteResultEnum, BackupJobResult, \
    SubJobPolicyEnum
from common.logger import SENSITIVE_WORDS
from common.file_common import exec_lchown
from common.util.exec_utils import exec_mkdir_cmd, exec_cp_cmd, exec_overwrite_file
from goldendb.schemas.glodendb_schemas import ActionResponse
from tpops.gaussdb.common.const import SubJobType, GaussSubJobName, EnvName, GaussBackupStatus, \
    GaussDBCode, LogLevel, ErrorCode, RoachConstant, RpcParamKey, LastCopyType, GaussBackupType, \
    BackupTypeEnum, TaskTypeEnum, SubJobPriorityEnum, GaussLogBackupStatus, JobInfo, PermissionNode, UserInfo, \
    BusinessConfigType, VERSION, GaussDBRdsErrorCode, QUERY_JOB_INTERVAL, MAX_QUERY_JOB_TIMES
from tpops.gaussdb.common.gaussdb_common import report_job_details, \
    get_std_in_variable, set_backup_name, extract_ip, read_file, \
    get_agent_roach_host_and_port, invoke_rpc_tool_interface, aggregate_single_copy_object_data, \
    exec_rc_tool_cmd, write_progress_file_with_status_and_speed, get_all_db_files, set_permisson, set_user_and_group, \
    get_mount_path, merge_time_body_info
from tpops.gaussdb.common.safe_get_information import ResourceParam
from tpops.gaussdb.common.tpops_gaussdb_exception import GaussDBException
from tpops.gaussdb.handle.resource.resource_info import ResourceInfo
from tpops.gaussdb.handle.exec_base import ExecBase
from tpops.gaussdb.common.log import log


class BackUp(ExecBase):

    def __init__(self, pid, job_id, sub_job_id, data, json_param):
        super().__init__(pid, job_id, sub_job_id, data)
        log.info("Start to init backup param")
        if not json_param:
            log.error("Parse params obj is null.")
            raise Exception("Parse params obj is null.")
        if not self._job_id:
            log.warn(f"self._job_id000: {self._job_id}")
        if not self._sub_job_id:
            log.warn(f"_sub_job_id: {self._sub_job_id}")
        self._json_param_object = json_param
        self._concrete_object_db = "backupkey.db"
        self._copy_id = self._json_param_object.get("job", {}).get("copy", [])[0].get("id", "")
        self._end_point = self._json_param_object.get("job", {}).get("protectEnv", {}).get("endpoint", "")
        self._host_ip = self.get_cur_task_ip()
        self._logdetail = None
        self._err_info = {}
        self._query_progress_interval = 30
        self._instance_id = self._json_param_object.get("job", {}).get("protectObject", {}).get("id", "")
        self._sub_job_name = ""
        self.backup_type = self._json_param_object.get("job", {}).get("jobParam", {}).get("backupType")
        if not self.backup_type:
            log.error(f"self.backup_type: {self.backup_type}")
        self._cache_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.CACHE_REPOSITORY)
        self._meta_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.META_REPOSITORY)
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            self._data_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.LOG_REPOSITORY)
            self._data_area_list = []
        else:
            self._data_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.DATA_REPOSITORY)
            self._data_area_list = self.get_repository_path_list(json_param, RepositoryDataTypeEnum.DATA_REPOSITORY)
        self._job_status = SubJobStatusEnum.RUNNING
        self._log_backup_part_success_flag = False
        self._backup_status = GaussBackupStatus.RUNNING
        self._err_code = 0
        EnvName.IAM_USERNAME = "job_protectEnv_auth_authKey"
        EnvName.IAM_PASSWORD = "job_protectEnv_auth_authPwd"
        self.user_name = get_std_in_variable(f"{EnvName.IAM_USERNAME}_{pid}")
        self._db_name = ""
        self._backupkey_name = ""
        self._detail_message = ""
        # 组装资源接入请求体
        self._extend_info = self._json_param_object.get("job", {}).get("protectEnv", {}).get("extendInfo", {})
        self._business_addr = self._extend_info.get("pmAddress", "")
        self._business_port = self._extend_info.get("pmPort", "")
        self._address = f"https://{self._business_addr}:{self._business_port}"
        self.client_crt = self._extend_info.get("clientCrt", "")
        self._fun_inst = ResourceInfo(pid, self._address)

    @staticmethod
    def get_params_by_key(param, json_const):
        param = param.get("job", {}).get("protectObject", {}).get("extendInfo", {}).get(json_const, "")
        if not param:
            log.error(f"Get param protectObject_extendInfo_json_const failed.")
        return param

    @staticmethod
    def set_error_response(response):
        response.code = GaussDBCode.FAILED.value
        response.body_err = GaussDBCode.FAILED.value

    @staticmethod
    def read_param_file(file_path):
        """
        解析参数文件
        :return:
        """
        if not os.path.isfile(file_path):
            raise Exception(f"File:{file_path} not exist")
        try:
            with open(file_path, "r", encoding='UTF-8') as f_content:
                json_dict = json.loads(f_content.read())
        except Exception as ex:
            raise Exception("parse param file failed") from ex
        return json_dict

    @staticmethod
    def constuct_clean_log(original_log: str):
        """
        判断key是否为敏感词
        :param original_log: 要检查的字符串
        :return: 返回处理敏感字符后的字符串
        """
        danger_index = []
        res = ''
        for sens_word in SENSITIVE_WORDS:
            # 全字匹配字符串等于敏感词本身，不需要脱敏,只需匹配关键字
            if sens_word.find("%") == -1:
                continue
            matches = re.finditer(sens_word.replace("%", ""), original_log, re.I)

            if not matches:
                continue
            # 需要保留敏感词首尾字符来提高报错日志可读性，且由于敏感字符串长度不小于3，可以直接取首字符+1至末尾字符-1位置
            for match in matches:
                danger_index.extend(range(match.start() + 1, match.end() - 1))

        for index, content in enumerate(original_log):
            if index in danger_index:
                # * 用于替代关键字符，修改敏感词避免日志信息被框架安全检验拦截
                res += '*'
            else:
                res += content
        return res

    def get_cache_path(self):
        return self._cache_area

    def backup_prerequisite_progress(self):
        """
        根据是否有进度文件 来判断前置任务检查结果
        @return:
        """
        # 当前根据是否存在RestorePrerequisiteProgress文件来上报前置任务进度
        log.info(f"step 1-4 start to upload backup prepare job progress ")
        job_status = SubJobStatusEnum.COMPLETED.value
        file_path = os.path.join(self._cache_area, "BackupPrerequisiteProgress")
        fail_file_path = os.path.join(self._cache_area, "BackupPrerequisiteFailProgress")
        progress = 100
        if not os.path.exists(file_path):
            log.info("backup_prerequisite_progress is running")
            job_status = SubJobStatusEnum.RUNNING.value
            progress = 0
        if os.path.exists(fail_file_path):
            log.info("backup_prerequisite_progress is fail")
            job_status = SubJobStatusEnum.FAILED.value
            progress = 0
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                               taskStatus=job_status, progress=progress, logDetail=self._logdetail)
        output_result_file(self._pid, output.dict(by_alias=True))
        log.info(f"step 1-4 end to upload backup prepare job progress ")

    def backup_post_job_progress(self):
        """
        根据是否有进度文件 来判断后置任务检查结果
        @return:
        """
        # 当前根据是否存在BackupPostProgress文件来上报前置任务进度
        log.info(f"Start to upload backup post job progress...")
        job_status = SubJobStatusEnum.COMPLETED.value
        file_path = os.path.join(self._cache_area, "BackupPostProgress")
        fail_file_path = os.path.join(self._cache_area, "BackupPostProgressFailProgress")
        progress = 100
        if not os.path.exists(file_path):
            log.info("backup_prerequisite_progress is running")
            job_status = SubJobStatusEnum.RUNNING.value
            progress = 0
        if os.path.exists(fail_file_path):
            log.info("backup_prerequisite_progress is fail")
            job_status = SubJobStatusEnum.FAILED.value
            progress = 0
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                               taskStatus=job_status, progress=progress, logDetail=self._logdetail)
        output_result_file(self._pid, output.dict(by_alias=True))

    def backup_pre_job(self):
        """
        前置任务:
        1、创建目录
        1、写入cache仓下/tmp/copy_id/taskInfo_{hostKey}.txt
        2、写入DataBackup下/stmp/dws_cacheInfo.txt
        3、创建 DB数据库
        @return:
        """
        log.info("step2-3 start to backup_pre_job")
        log.info(f"json_param_object: {self._json_param_object}")
        log.info(f"meta_area: {self._meta_area}")
        log.info(f"cache_area: {self._cache_area}")
        log.info(f"data_area: {self._data_area}")
        self.create_path_for_backup()
        self.save_business_config()
        self.create_host_db()
        self.check_archived_time()

        # 全量备份前置任务跳过合并
        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            # 存储备份文件系统至meta仓
            self.save_repo_relations_to_meta()
            log.info("success to save_repo_relations_to_meta")
            return True

        # 合并backupkey.db文件，增量在前置任务合并上一次副本的backupkey.db
        last_full_copy_id = self.get_last_full_copy_id()
        cache_path_parent = Path(self._cache_area).parent
        last_backup_record = os.path.join(cache_path_parent, last_full_copy_id, "meta", last_full_copy_id,
                                          "restoreMergeSqlite")

        # 适配升级场景, 从logdb目录下取数据
        if not os.path.exists(last_backup_record):
            past_copy_id = self.get_last_copy_id()
            mount_path_parent = Path(self._cache_area).parent
            last_backup_record = os.path.join(mount_path_parent, past_copy_id, "meta", past_copy_id, "logdb")
            log.info(f"Merge past backup source path not exist")

        self.merge_past_backup_xbsa_table(last_backup_record)
        log.info("step2-3 end to merge past sqlite")

        # 非日志备份直接返回
        if self.backup_type != BackupTypeEnum.LOG_BACKUP:
            return True

        # 读取归档时间
        backup_key_file_archived_time = os.path.join(cache_path_parent, "archived_time")
        log.info(f"get backup_key_file_archived_time {backup_key_file_archived_time}")
        if os.path.exists(backup_key_file_archived_time) and os.path.exists(last_backup_record):
            archived_time = read_file(backup_key_file_archived_time)
            log.info(f"get archived_time: {archived_time}")
        else:
            # 首次日志备份，只关联最近一次全量备份，返回，兼容补丁场景
            return True

        full_copy_id_and_timestamp_map_file = os.path.join(cache_path_parent, "full_copy_id_and_timestamp_maps")
        copy_id_timestamp_map_list = []
        if os.path.exists(full_copy_id_and_timestamp_map_file):
            copy_id_timestamp_map_list = read_file(full_copy_id_and_timestamp_map_file)
            log.info(f"Find copy id and timestamp maps: {copy_id_timestamp_map_list}")
        else:
            log.info("Not find copy id and timestamp maps")
            return True

        if len(copy_id_timestamp_map_list) <= 1:
            # 前面仅做过一次全备，返回; 存在升级场景，先推进最大可恢复时间到升级后全量备份。
            return True

        copy_id_list = []
        copy_id_timestamp_map_list.reverse()

        # 截取最大可恢复时间前一次备份，以及后续所有副本ID，按时间倒序排列
        for copy_id_timestamp_map in copy_id_timestamp_map_list:
            timestamp = copy_id_timestamp_map.get("time", "")
            copy_id = copy_id_timestamp_map.get("copy_id", "")
            log.info(f"Get timestamp: {timestamp}, copy id: {copy_id}")
            copy_id_list.append(copy_id)
            # 全量副本时间大于已归档到时间
            if timestamp < archived_time:
                log.info(f"The full backup timestamp is later than the archived timestamp")
                break

        log.info(f"get copy_id_list {copy_id_list}")
        if len(copy_id_list) <= 1:
            # 仅关联一个备份副本，已做过sqiite合并，无需再做一次
            return True

        last_full_copy_id = self.get_last_full_copy_id()
        for copy_id in copy_id_list:
            # 当前全量副本做过合并，不再合并
            if copy_id == last_full_copy_id:
                continue

            # 合并backupkey.db文件, 合并日志备份副本关联的所有副本的backupkey.db
            source_path = os.path.join(cache_path_parent, copy_id, "meta", copy_id, "restoreMergeSqlite")
            if not os.path.exists(source_path):
                # 补丁升级场景，只合并上次全备的sqlite
                log.info(f"Source db path not exist: {source_path}")
                continue
            log.info(f"Source db path exist: {source_path}")
            self.merge_past_backup_xbsa_table(source_path)

        # 记录该次日志备份依赖的全量备份副本ID，补丁后，需做一次全量备份、日志备份使这个逻辑生效
        log_associate_full_copy_id_path = os.path.join(self._cache_area, "log_associate_full_copy_id")
        exec_overwrite_file(log_associate_full_copy_id_path, copy_id_list)
        log.info(f"Write log associated copy id {copy_id_list} to {log_associate_full_copy_id_path}")
        return True

    def check_archived_time(self):
        # 数据库发生归档空间溢出、恢复、手动重启归档槽场景时，归档槽将重启，日志备份的backup_key参数需从上一次全备获取
        # 仅日志备份需处理重启归档场景：
        if self.backup_type != BackupTypeEnum.LOG_BACKUP.value:
            log.info("Only log backup need check archived time")
            return

        # 识别是否重启归档
        backup_sla = self._json_param_object.get("job", {}).get("extendInfo", {}).get("backupTask_sla")
        backup_policy = json.loads(backup_sla).get("policy_list")[0]
        log.info(f"Get backup policy {backup_policy}")
        restart_archive_flag = backup_policy.get("ext_parameters").get("restart_archive", False)
        log.info(f"Get restart archive flag {restart_archive_flag}")
        if not restart_archive_flag:
            return

        # 删除最大可恢复时间文件，后续计算backup_key参数时，将从上一次全备开始计算
        cache_path_parent = Path(self._cache_area).parent
        show_archived_time_path = os.path.join(cache_path_parent, "show_archived_time")
        archived_time_path = os.path.join(cache_path_parent, "archived_time")
        if os.path.exists(show_archived_time_path):
            log.info(f"start to remove show archived time file: {show_archived_time_path}")
            os.remove(show_archived_time_path)
        if os.path.exists(archived_time_path):
            log.info(f"start to remove archived time file: {archived_time_path}")
            os.remove(archived_time_path)

    def merge_past_backup_xbsa_table(self, source_path):
        log.info(f"Merge past backup use source path {source_path}")

        # 组装xbsa对象表目标路径
        target_path = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta")  # 聚合后的路径
        if not os.path.exists(target_path):
            if not exec_mkdir_cmd(target_path):
                return
        # 合并backupkey.db文件，增量在前置任务合并上一次副本的backupkey.db
        try:
            ret = aggregate_single_copy_object_data(target_path, source_path, self.get_repo_list(), True)
        except Exception as err:
            log.error(f"Aggregate object data failed. main task:{self._job_id}, err: {err}")
            return
        if not ret:
            log.error(f"Aggregate object data failed. main task:{self._job_id}")
            return

    def save_repo_relations_to_meta(self):
        repositories = self._json_param_object.get("job", {}).get("repositories", [])
        relations = []
        for rep in repositories:
            repository_type = rep.get("repositoryType")
            if repository_type != 1:
                continue
            device_sn = rep.get("extendInfo").get("esn")
            fs_id = rep.get("extendInfo").get("fsId")
            role = rep.get("role")
            remote_path = rep.get("remotePath", "").strip("/").split("/")[0]
            cur_relations = [
                {
                    'oldEsn': device_sn,
                    'oldFsId': fs_id,
                    'oldFsName': remote_path,
                    'role': role
                }
            ]
            relations = relations + cur_relations
        repo_relations_path = os.path.join(self._meta_area, 'meta', self._copy_id, 'repoRelations')
        exec_overwrite_file(repo_relations_path, relations)
        log.info(f"save_repo_relations_to_meta {relations} to {repo_relations_path}")

    def is_increment_copy(self):
        backup_type = self.backup_type
        log.info(f"get backupType: {backup_type}")
        if backup_type == BackupTypeEnum.FULL_BACKUP:
            return False
        return True

    def create_path_for_backup(self):
        """
        创建备份过程中所需要的目录
        @return:
        """
        # meta仓/meta/copy_id/objectmeta/host_ip
        meta_path_object_host = os.path.join(self._meta_area, 'meta', self._copy_id, 'objectmeta')
        if not os.path.exists(meta_path_object_host):
            if not exec_mkdir_cmd(meta_path_object_host):
                return False
        self.change_meta_permission(meta_path_object_host)

        # cache仓/meta/copy_id/objectmeta
        cache_path_object_host = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta")
        if not os.path.exists(cache_path_object_host):
            if not exec_mkdir_cmd(cache_path_object_host):
                return False
        self.change_cache_meta_permission(cache_path_object_host)

        # 创建speed/{host_key} 目录
        cache_path_speed_host = os.path.join(self._cache_area, 'tmp', self._copy_id, 'speed')
        if not os.path.exists(cache_path_speed_host):
            if not exec_mkdir_cmd(cache_path_speed_host):
                return False
        self.change_cache_tmp_permisson(cache_path_speed_host)
        return True

    def create_path_for_cur_agent(self):
        """
        创建备份过程中所需要的目录
        @return:
        """
        # meta仓/meta/copy_id/objectmeta/host_ip
        meta_path_object_host = os.path.join(self._meta_area, 'meta', self._copy_id, 'objectmeta', self._host_ip)
        if not os.path.exists(meta_path_object_host):
            if not exec_mkdir_cmd(meta_path_object_host):
                return False
        self.change_meta_permission(meta_path_object_host)

        # 创建speed/{host_key} 目录
        cache_path_speed_host = os.path.join(self._cache_area, 'tmp', self._copy_id, 'speed', self._host_ip)
        if not os.path.exists(cache_path_speed_host):
            if not exec_mkdir_cmd(cache_path_speed_host):
                return False
        self.change_cache_tmp_permisson(cache_path_speed_host)
        return True

    def change_cache_tmp_permisson(self, cache_path_speed_host):
        set_user_and_group(cache_path_speed_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_speed_host, PermissionNode.PERMISSION_700)
        # cache仓/tmp/copy_id/speed
        cache_path_speed = os.path.join(self._cache_area, 'tmp', self._copy_id, 'speed')
        set_user_and_group(cache_path_speed, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_speed, PermissionNode.PERMISSION_700)
        # cache仓/tmp
        meta_path_tmp = os.path.join(self._cache_area, 'tmp')
        set_user_and_group(meta_path_tmp, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_tmp, PermissionNode.PERMISSION_750)
        # cache仓/tmp/copy_id
        meta_path_tmp_copy_id = os.path.join(self._cache_area, 'tmp', self._copy_id)
        set_user_and_group(meta_path_tmp_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_tmp_copy_id, PermissionNode.PERMISSION_750)

    def change_cache_meta_permission(self, cache_path_object_host):
        set_user_and_group(cache_path_object_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_object_host, PermissionNode.PERMISSION_700)
        # cache仓
        meta_path = os.path.join(self._cache_area)
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # cache仓/meta
        meta_path = os.path.join(self._cache_area, 'meta')
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # cache仓/meta/copy_id
        meta_path_copy_id = os.path.join(self._cache_area, 'meta', self._copy_id)
        set_user_and_group(meta_path_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_copy_id, PermissionNode.PERMISSION_750)
        # cache仓/meta/copy_id/objectmeta
        meta_path_objectmeta = os.path.join(self._cache_area, 'meta', self._copy_id, 'objectmeta')
        set_user_and_group(meta_path_objectmeta, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_objectmeta, PermissionNode.PERMISSION_700)

    def change_meta_permission(self, meta_path_object_host):
        set_user_and_group(meta_path_object_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_object_host, PermissionNode.PERMISSION_700)
        # meta仓
        meta_path = os.path.join(self._meta_area)
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # meta仓/meta
        meta_path = os.path.join(self._meta_area, 'meta')
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # meta仓/meta/copy_id
        meta_path_copy_id = os.path.join(self._meta_area, 'meta', self._copy_id)
        set_user_and_group(meta_path_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_copy_id, PermissionNode.PERMISSION_750)
        # meta仓/meta/copy_id/objectmeta
        meta_path_objectmeta = os.path.join(self._meta_area, 'meta', self._copy_id, 'objectmeta')
        set_user_and_group(meta_path_objectmeta, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_objectmeta, PermissionNode.PERMISSION_700)

    def save_task_info_to_cache(self):
        """
            功能描述：写入cache仓下/tmp/copy_id/taskInfo_{hostKey}.txt
            参数：
            返回值: True or False
        """
        task_info = {
            "repositories": self.get_repo_list(),
            "taskType": TaskTypeEnum.BACKUP.value,
            "copyType": self.backup_type
        }
        log.info(f"success to get taskInfo {task_info}")
        task_path = os.path.join(self._cache_area, 'tmp', self._copy_id, f'taskInfo_{self._host_ip}.txt')
        exec_overwrite_file(task_path, task_info)
        if not os.path.exists(task_path):
            log.error(f"Create taskInfo_host_ip_txt file failed.")
            return False
        set_user_and_group(task_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(task_path, PermissionNode.PERMISSION_400)
        return True

    def save_cacheinfo_to_cache(self):
        """
            功能描述：写入DataBackup下/stmp/dws_cacheInfo.txt
            参数：
            返回值: True or False
        """
        # 写cache仓路径到tmp目录， 每个节点 各享一个
        cache_info = {
            "cacheRepoPath": self._cache_area, "metaRepoPath": self._meta_area, "copyId": self._copy_id,
            "taskId": self._job_id, "hostKey": self._host_ip
        }
        log.info(f"Create dws_cacheInfo: {cache_info}")
        cache_info_path = os.path.join(RoachConstant.XBSA_FILE,
                                       f"xbsa_cacheInfo_info_tpops_{self._instance_id}_xbsa_backup.txt")
        exec_overwrite_file(cache_info_path, cache_info)
        set_user_and_group(cache_info_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_info_path, PermissionNode.PERMISSION_400)
        if not os.path.exists(cache_info_path):
            log.error("Create dws_cacheInfo.txt failed.")
            return False
        return True

    def get_cur_task_ip(self):
        cur_task_ip = ""
        host_ip = extract_ip()
        log.info(f"get host_ip： {host_ip}")
        log.info(f"get end_point list: {self._end_point}")
        task_ip_list = self._end_point.split(',')
        for task_ip in task_ip_list:
            if task_ip in host_ip:
                cur_task_ip = task_ip
        log.info(f"get cur task ip: {cur_task_ip}")
        return cur_task_ip

    def save_business_config(self):
        """
            功能描述：写入DataBackup下business_config, 传递任务类型
            参数：
            返回值: True or False
        """
        # 写cache仓路径到tmp目录， 每个节点 各享一个
        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            backup_file_type = BusinessConfigType.FULL_BACKUP_TYPE
        else:
            backup_file_type = BusinessConfigType.INCREMENT_BACKUP_TYPE
        business_type_info = {"jobType": backup_file_type}
        business_type_info_path = os.path.join(self._cache_area, "tmp",
                                               self._copy_id, RoachConstant.BUSINESS_CONFIG_FILE)
        exec_overwrite_file(business_type_info_path, business_type_info)
        log.info(f"success to save_business_config, jobType: {backup_file_type}")
        if not os.path.exists(business_type_info_path):
            log.error("Create dws_cacheInfo.txt failed.")
            return False
        set_user_and_group(business_type_info_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(business_type_info_path, PermissionNode.PERMISSION_400)
        return True

    def get_current_backup_id(self):
        # 调用 工具生成的id 从meta仓获取 备份任务id(cache 仓？)
        job_file = os.path.join(self._cache_area, f"jobid_{self._job_id}")
        if not os.path.exists(job_file):
            log.error(f"copy info path not exist.")
            return {}
        backup_job_id = self.read_param_file(job_file)
        log.info(f"get backup_job_id:{backup_job_id}")
        return backup_job_id

    def get_progress(self):
        # 调用 工具生成的id 从meta仓获取 备份任务id(cache 仓？)
        backup_job_id = self.get_current_backup_id()
        if not backup_job_id:
            return {}
        log.info(f"get backup_job_id:{backup_job_id}")

        ret_body = self._fun_inst.get_job_info(backup_job_id)
        if not ret_body:
            log.error(f"Failed get job info!")
            return {}
        if "message" in ret_body:
            clean_message = self.constuct_clean_log(ret_body.get("message"))
            log.error(f'Exec GET error with return: {clean_message}')
            return {}
        return ret_body

    def get_log_backup_progress(self):
        log.info(f"start to get_log_backup_list")
        # 调用 工具生成的id 从meta仓获取 备份任务id(cache 仓？)
        ret_body = self._fun_inst.get_log_backup_list(self._instance_id)
        if not ret_body:
            log.error(f"Failed get job info!")
            return {}
        if "message" in ret_body:
            clean_message = self.constuct_clean_log(ret_body.get("message"))
            log.error(f'Exec GET error with return: {clean_message}')
            return {}
        log.info(f"get_log_backup_progress ret_body {ret_body}")
        return ret_body

    def get_backup_progress(self):
        log.info(f"start to get_log_backup_list")
        # 调用 工具生成的id 从meta仓获取 备份任务id(cache 仓？)
        ret_body = self._fun_inst.get_backup_list(self._instance_id)
        if not ret_body:
            log.error(f"Failed get job info!")
            return {}
        if "message" in ret_body:
            clean_message = self.constuct_clean_log(ret_body.get("message"))
            log.error(f'Exec GET error with return: {clean_message}')
            return {}
        log.info(f"get_backup_progress ret_body {ret_body}")
        return ret_body

    def get_backup_diff_progress(self):
        ret_body = self._fun_inst.get_diff_backup_list(self._instance_id)
        if not ret_body:
            log.error(f"Failed get job info!")
            return {}
        if "message" in ret_body:
            clean_message = self.constuct_clean_log(ret_body.get("message"))
            log.error(f'Exec GET error with return: {clean_message}')
            return {}
        log.info(f"get_backup_diff_progress ret_body {ret_body}")
        return ret_body

    def get_log_comm(self):
        return f"pid:{self._pid} jobId:{self._job_id} subjobId:{self._sub_job_id}"

    def write_backup_progress(self, backup_id=''):
        # 定时上报备份进度
        # 重试次数
        retry_count = 0
        while self._backup_status == GaussBackupStatus.RUNNING:
            # 没有进度文件可能是还没有生成,不返回失败
            # 通过请求任务信息获取任务进度
            if retry_count >= MAX_QUERY_JOB_TIMES:
                log.error("Max retries reached for job_json is None")
                break
            progress_info = self.get_progress()
            log.info(f"Get backup progress {progress_info}")
            job_json = progress_info.get("job")
            backup_info = ''
            if job_json is None:
                log.warning("Get backup progress job_json is None, retrying...")
                retry_count += 1
                time.sleep(QUERY_JOB_INTERVAL)
                continue
            self._backup_status = job_json.get("status", "")
            if self._backup_status == GaussBackupStatus.RUNNING:
                status = SubJobStatusEnum.RUNNING
            elif self._backup_status == GaussBackupStatus.SUCCEED:
                status = SubJobStatusEnum.COMPLETED
                # 日志备份场景，需记录重启cn条件，并上报restore_time
                if self.backup_type == BackupTypeEnum.LOG_BACKUP.value:
                    self.upload_log_backup_info()
                elif self.backup_type == BackupTypeEnum.FULL_BACKUP.value:
                    backups = self._fun_inst.get_backup_list(self._instance_id, backup_id).get("backups", [])
                    backup_info = backups[0] if backups else ''
                    log.info(f"Complete full backup info: {backup_info}")
                else:
                    backups = self._fun_inst.get_diff_backup_list(self._instance_id, 1).get("differential_backups", [])
                    backup_info = backups[0] if backups and backups[0]['id'] == backup_id else ''
                    log.info(f"Complete diff backup info: {backup_info}")
            else:
                raise Exception(json.dumps(job_json))
            log.info(f"progress_info.job(status and progress): {job_json}")
            try:
                progress = int(progress_info.get("progress", "0"))
            except Exception:
                log.error("Failed calculate progress")
                progress = 0
            log.info(f"status：{status}   progress: {progress}")
            time.sleep(self._query_progress_interval)
            job_info = self.get_job_info()
            write_progress_file_with_status_and_speed(job_info, status, progress, True, backup_info)

    def write_log_backup_progress(self):
        # 定时上报备份进度
        log.info(f"write_log_backup_progress {self._backup_status}")
        while self._backup_status == GaussLogBackupStatus.RUNNING:
            # 没有进度文件可能是还没有生成,不返回失败
            # 通过请求任务信息获取任务进度
            progress_info = self.get_log_backup_progress()
            job_json = progress_info.get("backups")[0]
            self._backup_status = job_json.get("status", "")
            log.info(f"get_backup_status {self._backup_status}")
            if self._backup_status == GaussLogBackupStatus.RUNNING:
                status = SubJobStatusEnum.RUNNING
            elif self._backup_status == GaussLogBackupStatus.SUCCEED:
                status = SubJobStatusEnum.COMPLETED
                # 日志备份场景，需记录重启cn条件，并上报restore_time
                self.upload_log_backup_info()
            else:
                # 日志备份任务下发后，无论数据库返回什么报错，都返回成功
                self._detail_message = job_json.get("detail_message", "")
                raise Exception(json.dumps(job_json))
            log.info(f"progress_info.job(status and progress): {job_json}")
            log.info(f"status：{status}   progress: 0")
            time.sleep(self._query_progress_interval)
            job_info = self.get_job_info()
            write_progress_file_with_status_and_speed(job_info, status, 0, True)

    def upload_log_backup_info_when_return_fail(self):
        # 归档时间间隔过大场景，直接上报副本，下次备份转全备
        if self._detail_message != "" and 'GAUSS-53435' in self._detail_message:
            self.deal_with_log_intervel_too_long()
            return

        # 日志备份数据库返回失败，需要识别错误码，以及查询cn重启标志，并记录
        # 接口返回失败，返回错误码，算任务成功，保留副本，根据副本，上报可恢复时间，根据cn_build_flag的值决定下次备份是否转全备
        # 查询basic_recovery_time相对文件路径
        log.info("Log back up fail, report part success")
        source_path = os.path.join(self._meta_area, "meta", self._copy_id, "objectmeta")

        # 归档时未产生backupkey，说明数据库未传数据给x8000,这种场景下保留副本，但在合并sqlite时，不参与合并
        db_file_list = get_all_db_files(source_path)
        if len(db_file_list) == 0:
            log.info("Log Back up not send data to xbsa")
            backup_no_send_data_flag_path = os.path.join(self._cache_area, "meta", self._copy_id, "no_send_data_flag")
            flag_contend = {}
            exec_overwrite_file(backup_no_send_data_flag_path, flag_contend)
            log.info(f"Get sqlite info is empty, no data received, no need save copy, job id: {self._job_id}")
            return

        # 未产生cn_build_flag文件，保留副本，上报可恢复时间1s, 下次副本不转全备
        restore_time_files = self.query_restore_time_file_relative_path(source_path)
        log.info(f"Get restore time files: {restore_time_files}")
        if not restore_time_files:
            self.report_log_back_part_success()
            return

        time_body_infos = self.get_time_body_infos(restore_time_files)
        log.info(f"Get restore time body infos: {time_body_infos}")
        if not time_body_infos:
            self.report_log_back_part_success()
            return

        # 能取到可恢复时间，正常上报
        time_body_info = merge_time_body_info(time_body_infos)
        log.info(f"Get restore time body info: {time_body_info}")

        # 上报restore_time
        ret = self.update_restore_time(time_body_info, False)
        # 保存已归档到的时间
        self.save_archived_time(time_body_info)
        self.save_show_archived_time(time_body_info)
        if not ret:
            log.error(f"update restore time error")

    def get_time_body_infos(self, restore_time_files):
        """
        获取恢复时间信息

        :param restore_time_files: 可恢复时间文件列表，每个元素是一个元组，包含相对路径和文件系统名称
        :return: 返回恢复时间信息列表，每个元素是一个字典，包含恢复时间、开始时间、结束时间列表和cn重启标记
        """
        # 能取到可恢复时间，正常上报
        time_body_infos = []
        for restore_time_file_relative_path, file_system_name in restore_time_files:
            mount_path = get_mount_path(file_system_name, self.get_repo_list())

            # 组装路径
            restore_time_file_absolute_path = mount_path + restore_time_file_relative_path

            # 取出basic_recovery_time，取出开始时间, 结束时间列表, 以及cn重启标记
            lines = self.read_xbsa_objects(restore_time_file_absolute_path)

            if len(lines) >= 3:
                time_body_info = self.get_restore_time_and_cn_build_flag(lines)
                time_body_infos.append(time_body_info)
        return time_body_infos

    def deal_with_log_intervel_too_long(self):
        self.report_log_back_part_success()
        backup_no_send_data_flag_path = os.path.join(self._cache_area, "meta", self._copy_id, "no_send_data_flag")
        flag_contend = {}
        exec_overwrite_file(backup_no_send_data_flag_path, flag_contend)
        cache_path_parent = Path(self._cache_area).parent
        next_backup_to_full_flag_path = os.path.join(cache_path_parent, "nextBackupToFullFlag")
        next_backup_to_full_flag = {}
        log.info(
            f"get backup_key_file_archived_time {next_backup_to_full_flag_path}")
        exec_overwrite_file(next_backup_to_full_flag_path, next_backup_to_full_flag)

    def get_restore_time_and_cn_build_flag(self, lines):
        time_body_info = []
        cn_build_flag = 0
        for line in lines:
            if line == "":
                continue
            words = line.split(",")
            if len(words) > 1:
                time_body = [int(words[0]), int(words[1])]
                time_body_info.append(time_body)
            else:
                cn_build_flag = line[len(line) - 2]
        if cn_build_flag == 1:
            cache_path_parent = Path(self._cache_area).parent
            next_backup_to_full_flag_path = os.path.join(cache_path_parent, "nextBackupToFullFlag")
            next_backup_to_full_flag = {}
            log.info(
                f"get backup_key_file_archived_time {next_backup_to_full_flag_path}")
            exec_overwrite_file(next_backup_to_full_flag_path, next_backup_to_full_flag)
        return time_body_info

    def report_log_back_part_success(self):
        # 读取归档时间, 取值优先顺序show_archived_time、 archived_time、 上次数据副本时间
        cache_path_parent = Path(self._cache_area).parent
        show_archived_time_path = os.path.join(cache_path_parent, "show_archived_time")
        archived_time_path = os.path.join(cache_path_parent, "archived_time")
        log.info(f"get backup_key_file_archived_time {show_archived_time_path}")
        if os.path.exists(show_archived_time_path):
            archived_time = int(read_file(show_archived_time_path))
            log.info(f"get show archived time: {archived_time}")
        elif os.path.exists(archived_time_path):
            archived_time = int(read_file(archived_time_path))
            log.info(f"get archived time: {archived_time}")
        else:
            copy_info = self.get_last_copy_info(LastCopyType.FULL_DIFF_COPIES)
            log.info(f"get last copy info {copy_info}")
            archived_time = int(copy_info.get("extendInfo", {}).get("backupTime"))
        time_body_info = [[archived_time, archived_time + 1]]
        log.info(f"Get time_body_info: {time_body_info}")
        # 保存已归档到的时间
        self.update_restore_time(time_body_info, False)
        self.save_show_archived_time(time_body_info)
        # 上报当前日志副本不完整，不可执行恢复
        log_detail = LogDetail(logInfo="tpops_log_backup_success_with_incomplete_copy_label", logInfoParam=[],
                               logLevel=2)
        report_job_details(self._job_id,
                           SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=50,
                                         logDetail=[log_detail],
                                         taskStatus=SubJobStatusEnum.RUNNING.value).dict(by_alias=True))

    def save_show_archived_time(self, time_body_info):
        # 记录归档到的时间，下次日志归档从该时间开始
        archived_time = int(time_body_info[len(time_body_info) - 1][1])
        cache_path_parent = Path(self._cache_area).parent
        backup_key_file_archived_time = os.path.join(cache_path_parent, "show_archived_time")
        log.info(f"get show_archived_time {backup_key_file_archived_time}, show_archived_time: {archived_time}")
        exec_overwrite_file(backup_key_file_archived_time, archived_time)

    def upload_backup_progress(self):
        # 定时上报备份进度
        while self._job_status == SubJobStatusEnum.RUNNING and not self._log_backup_part_success_flag:
            log.info(f"Start to report status {self._job_status}")
            progress_file = os.path.join(self._cache_area, f"progress_{self._job_id}_{self._sub_job_id}")
            # 没有进度文件可能是还没有生成,不返回失败
            comm_progress_dict = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                               taskStatus=SubJobStatusEnum.RUNNING,
                                               progress=0, logDetail=self._logdetail)
            if not os.path.exists(progress_file):
                report_job_details(self._job_id, comm_progress_dict.dict(by_alias=True))
                time.sleep(self._query_progress_interval)
                continue
            with open(progress_file, "r") as f_object:
                progress_dict = json.loads(f_object.read())

            self._job_status = progress_dict.get("taskStatus")
            log.info(f"Get progress_dict in upload_backup_progress.{self._job_status}")
            log.info(f"upload_backup_progress{self.get_log_comm()}")
            if not self._job_status:
                log.error(f"Failed to obtain the task status.{self.get_log_comm()}")
                self._job_status = SubJobStatusEnum.FAILED
                fail_dict = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                          taskStatus=SubJobStatusEnum.FAILED, progress=100,
                                          logDetail=self._logdetail)
                progress_dict = fail_dict.dict(by_alias=True)

            time.sleep(self._query_progress_interval)
            report_job_details(self._job_id, progress_dict)

    def backup_task_subjob_dict(self):
        sub_job_dict = {
            GaussSubJobName.SUB_XBSA: self.sub_job_xbsa,
            GaussSubJobName.SUB_ROACH: self.sub_job_roach,
            GaussSubJobName.SUB_EXEC: self.sub_job_exec,
            GaussSubJobName.SUB_MERGE_DB: self.sub_merge_db,
            GaussSubJobName.SUB_QUERY_COPY: self.sub_query_copy
        }
        return sub_job_dict

    def backup_task(self):
        log.info(f"backup_task start")
        job_info = self.get_job_info()
        write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.RUNNING, 0, False)
        # 启动一个线程查询备份进度
        sub_job_dict = self.backup_task_subjob_dict()
        progress_thread = threading.Thread(name='pre_progress', target=self.upload_backup_progress)
        progress_thread.daemon = True
        progress_thread.start()
        # 执行子任务
        sub_job_name = ResourceParam.get_sub_job_name(self._json_param_object)
        log.info(f"sub_job_name : {sub_job_name}")
        if not sub_job_name:
            return False

        self._sub_job_name = sub_job_name

        try:
            ret = sub_job_dict.get(sub_job_name)()
        except Exception as err:
            log.error(f"do sub job fail: {err}")
            log.error(f"Exec sub job {sub_job_name} failed.{self.get_log_comm()}.")
            log_detail_param = []
            error_message = err.args[0]
            log.info(f"get error_message: {error_message}")
            self._err_code = ErrorCode.ERR_BACKUP_RESTORE
            log.info(f"get error_code: {self._err_code}")
            log_detail_param.append(self._instance_id)
            if self._err_code == ErrorCode.ERR_BACKUP_RESTORE:
                log_detail_param.append(error_message)
            self.report_job_fail_info(log_detail_param)
            if self.backup_type == BackupTypeEnum.LOG_BACKUP:
                self._log_backup_part_success_flag = True
                progress_thread.join()
                return True
            return False
        if not ret:
            log.error(f"Exec sub job {sub_job_name} failed.{self.get_log_comm()}.")
            log_detail_param = []
            self._err_code = ErrorCode.ERR_BACKUP_RESTORE
            log_detail_param.append(self._instance_id)
            self.report_job_fail_info(log_detail_param)
            if self.backup_type == BackupTypeEnum.LOG_BACKUP:
                self._log_backup_part_success_flag = True
                progress_thread.join()
                return True
            return False

        progress_thread.join()
        log.info(f"backup_task end")
        return True

    def report_job_fail_info(self, log_detail_param):
        task_status = SubJobStatusEnum.FAILED.value
        log.info(f"get log detail param {log_detail_param}")
        # 日志备份失败，算作部分成功，保留副本
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            self.upload_log_backup_info_when_return_fail()
            task_status = SubJobStatusEnum.COMPLETED.value
        log_detail = LogDetail(logInfo="plugin_task_subjob_fail_label", logInfoParam=[self._sub_job_id],
                               logLevel=LogLevel.ERROR.value, logDetail=self._err_code,
                               logDetailParam=log_detail_param)
        report_job_details(self._pid,
                           SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=100,
                                         logDetail=[log_detail], taskStatus=task_status).dict(
                               by_alias=True))

    def build_sub_job(self, job_priority, policy, job_name, node_id=None):
        sub_job_model = SubJobModel(jobId=self._job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value,
                                    jobPriority=job_priority, jobName=job_name, policy=policy, ignoreFailed=False)
        if node_id:
            sub_job_model.exec_node_id = node_id
        return sub_job_model.dict(by_alias=True)

    def gen_sub_job_pre(self):
        # 录入备份开始时间
        start_time = str(int((time.time())))
        start_time_path = os.path.join(self._cache_area, f'T{self._job_id}')

        # 续作场景，不刷新任务开始时间
        if not os.path.exists(start_time_path):
            exec_overwrite_file(start_time_path, start_time, json_flag=False)
            log.info(f"Success to write backup start time {start_time} to {start_time_path}.")

        log.info("step2-4 start to gen_sub_job")
        file_path = os.path.join(ParamConstant.RESULT_PATH, f"result{self._pid}")
        nodes = self._json_param_object.get("job", {}).get("protectEnv", {}).get("nodes", [])
        log.info(f"gen_sub_job get nodes")
        return nodes, file_path

    def gen_roach_job(self, sub_job_array):
        # 生成roach子任务：sla选择1到多个代理，给指定代理下Roach子任务；sla不选agent，给全部可用状态代理下roach子任务
        job_name = GaussSubJobName.SUB_ROACH
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_2
        sub_job = self.build_sub_job(job_priority, SubJobPolicyEnum.EVERY_NODE_ONE_TIME_SKIP_OFFLINE.value, job_name)
        sub_job_array.append(sub_job)

    def gen_sub_job(self):
        nodes, file_path = self.gen_sub_job_pre()
        if len(nodes) == 0:
            log.error("nodes is empty")
            return False
        sub_job_array = []
        # 子任务1：设置备份介质为xbsa
        self.gen_sub_xbsa_job(sub_job_array)

        # 子任务2：Roach-sla选择1到多个代理，给指定代理下Roach子任务；sla不选agent，给全部可用状态代理下roach子任务
        self.gen_roach_job(sub_job_array)

        # 子任务3：执行
        job_policy2 = SubJobPolicyEnum.ANY_NODE.value
        job_name = GaussSubJobName.SUB_EXEC
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_3
        sub_job = self.build_sub_job(job_priority, job_policy2, job_name)
        sub_job_array.append(sub_job)

        # 子任务4：合并xbsa对象数据
        job_policy3 = SubJobPolicyEnum.ANY_NODE.value
        job_name = GaussSubJobName.SUB_MERGE_DB
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_4
        sub_job = self.build_sub_job(job_priority, job_policy3, job_name)
        sub_job_array.append(sub_job)

        # 子任务5： 上报备份副本
        if self.backup_type != BackupTypeEnum.LOG_BACKUP:
            # 日志备份模式下，完成任务时会上报备份副本，无需额外上报备份副本
            log.info(f"backup_type is log. A backup copy needs to be reported.")
            job_policy4 = SubJobPolicyEnum.ANY_NODE.value
            job_name = GaussSubJobName.SUB_QUERY_COPY
            job_priority = SubJobPriorityEnum.JOB_PRIORITY_5
            sub_job = self.build_sub_job(job_priority, job_policy4, job_name)
            sub_job_array.append(sub_job)

        log.info(f"step2-4 Sub-task splitting succeeded. sub_job_array: {sub_job_array}, "
                 f"sub-task num:{len(sub_job_array)}")
        exec_overwrite_file(file_path, sub_job_array)
        log.info("step2-4 end to gen_sub_job")
        return True

    def gen_sub_xbsa_job(self, sub_job_array):
        job_policy = SubJobPolicyEnum.ANY_NODE.value
        job_name = GaussSubJobName.SUB_XBSA
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_1
        sub_job = self.build_sub_job(job_priority, job_policy, job_name)
        sub_job_array.append(sub_job)

    def get_last_copy_info(self, last_copy_type):
        input_param = {
            RpcParamKey.APPLICATION: self._json_param_object.get("job", {}).get("protectObject"),
            RpcParamKey.TYPES: last_copy_type,
            RpcParamKey.COPY_ID: "",
            RpcParamKey.JOB_ID: self._job_id
        }
        try:
            result = invoke_rpc_tool_interface(self._job_id, RpcParamKey.QUERY_PREVIOUS_CPOY, input_param)
        except Exception as err_info:
            log.error(f"Get last copy info fail.{err_info}")
            return {}
        return result

    def check_backup_job_type(self):
        log.info("step2-2 start to check backup job type")

        # 当此次任务是增量量备份，且之前没做过全量备份，需要增量转全量

        def check_last_copy_is_null():
            # 读取last_copy_info
            last_copy_type = LastCopyType.FULL_COPIES
            last_copy_id_path = 'last_increment_copy_id'
            if self.backup_type == BackupTypeEnum.LOG_BACKUP:
                log.info(f"backup_type is log")
                last_copy_type = LastCopyType.ALL_COPIES
                last_copy_id_path = 'last_copy_id'
            last_copy_info = self.get_last_copy_info(last_copy_type)
            if len(last_copy_info) != 0:
                # 读取cache仓中记录的上一次副本ID,
                # 如果此次是增量备份，检测上次全備备份副本是否存在；
                # 如果此次是日志备份，检测上次任意任意副本是否存在:
                log.info(f"last_copy_info exits")
                cache_path_parent = Path(self._cache_area).parent
                last_copy_id_file = os.path.join(cache_path_parent, last_copy_id_path)
                with open(last_copy_id_file, "r", encoding='utf-8') as copy_info:
                    last_copy_id = copy_info.read().strip()

                # 从rpc接口中查询上一次副本ID
                last_copy_info_id = last_copy_info.get("id", "")
                log.info(f"get last_copy_info_id {last_copy_info_id}, last_copy_id {last_copy_id}")

                # 判断上一次副本是否被删除, 如果被删除，转全量备份
                if last_copy_info_id == last_copy_id:
                    log.info("Previous backup copy exists")
                    return False
                else:
                    log.info("last_copy_info_id is different from last_copy_id")
                    return True
            else:
                log.info("last_copy_info is empty")
                return True

        log.info(f'step 2-2: start execute check_backup_job_type, pid: {self._pid}, job_id:{self._job_id}')
        backup_type = self.backup_type
        log.debug(f"check backup_type is {backup_type}")
        if not backup_type:
            log.debug(f"backup_type is empty")
            return False
        if backup_type == BackupTypeEnum.FULL_BACKUP:
            log.debug(f"backup_type is full_backup")
            return True
        if check_last_copy_is_null() or self.check_next_backup_to_full_flag():
            response = ActionResponse(code=ExecuteResultEnum.INTERNAL_ERROR,
                                      bodyErr=ErrorCode.ERROR_INCREMENT_TO_FULL,
                                      message="Can not apply this type backup job")
            output_result_file(self._pid, response.dict(by_alias=True))
            log.info("change backup_type increment to full")
            return False
        response = ActionResponse(code=ExecuteResultEnum.SUCCESS)
        output_result_file(self._pid, response.dict(by_alias=True))
        log.info(f'step 2-2: finish execute check_backup_job_type, pid: {self._pid}, job_id:{self._job_id}')
        return True

    def check_next_backup_to_full_flag(self):
        cache_path_parent = Path(self._cache_area).parent
        next_backup_to_full_flag_path = os.path.join(cache_path_parent, "nextBackupToFullFlag")
        if os.path.exists(next_backup_to_full_flag_path):
            log.info(f"Get next backup to full flag, convert to full backup")
            return True
        else:
            log.info(f"Not get next backup to full flag")
            return False

    def sub_job_xbsa(self):
        log.info(f"start to set xbsa backup media. {self.get_log_comm()}")
        # tpops版本直接返回成功
        job_info = self.get_job_info()
        if self._fun_inst.db_version == VERSION.TPOPS:
            write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.COMPLETED, 100, False)
            return True
        request_body = {
            "type": "xbsa"
        }
        try:
            ret_body = self._fun_inst.change_backup_media(self._instance_id, request_body)
        except Exception as e:
            log.error(f"change backup media to xbsa error exception: {e}")
            raise GaussDBException(f"change backup media to xbsa error exception: {e}")
        # 已经切换为xbsa将不再切换
        if ret_body.get("errCode") == GaussDBRdsErrorCode.REPEAT_CHANGE_BACKUP_MEDIA_TO_XBSA:
            write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.COMPLETED, 100, False)
            return True
        if "job_id" not in ret_body:
            log.error(f"set xbsa backup media failed. {ret_body}")
            write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.FAILED, 100, False)
            raise GaussDBException(f"set xbsa backup media failed. {ret_body}")
        job_id = ret_body.get("job_id")
        job_status = GaussBackupStatus.RUNNING

        # 重试次数
        retry_count = 0
        while job_status == GaussBackupStatus.RUNNING:
            if retry_count >= MAX_QUERY_JOB_TIMES:
                log.error("Max retries reached for job_json is None")
                break
            progress_info = self._fun_inst.get_job_info(job_id)
            log.info(f"Get set xbsa progress {progress_info}")
            job_json = progress_info.get("job")
            if job_json is None:
                log.warning("Set xbsa job_json is None, retrying...")
                retry_count += 1
                time.sleep(QUERY_JOB_INTERVAL)
                continue
            job_status = job_json.get("status", "")
            log.info(f"progress_info.job(status and progress): {job_json}")
            time.sleep(self._query_progress_interval)
            if job_status == GaussBackupStatus.RUNNING:
                status = SubJobStatusEnum.RUNNING
            elif job_status == GaussBackupStatus.SUCCEED:
                status = SubJobStatusEnum.COMPLETED
            else:
                raise Exception(json.dumps(job_json))
            write_progress_file_with_status_and_speed(job_info, status, 0, False)
        log.info(f"set xbsa backup media finished. {self.get_log_comm()}")
        if job_status == GaussBackupStatus.SUCCEED:
            return True
        else:
            return False

    def sub_job_roach(self):
        log.info("step2-5 start to sub_job_roach")
        self.create_path_for_cur_agent()
        self.save_cacheinfo_to_cache()
        self.save_task_info_to_cache()

        # 查询本节点的roach进程中配置的ip和端口，写入meta
        # 保存本节点roachAgent的id
        get_host_and_port_error_flag = False
        try:
            host, port = get_agent_roach_host_and_port()
        except Exception as err:
            get_host_and_port_error_flag = True
        if get_host_and_port_error_flag:
            raise Exception(RpcParamKey.ERR_BACKUP_AGENT_NOT_START)
        host_ip = host
        host_port = f"{host}:{port}"
        log.info(f"step2-5-1 sub_job_roach {host_port}")
        host_port_info_file = os.path.join(self._meta_area, "hostInfo", self._job_id)
        # 文件夹不存在就创建
        if not os.path.exists(host_port_info_file):
            log.info(f"start to make dir {host_port_info_file}")
            exec_mkdir_cmd(host_port_info_file)

        host_port_file = os.path.join(self._meta_area, "hostInfo", self._job_id, f"host_port_{host_ip}")
        # 文件存在就删除
        if os.path.exists(host_port_file):
            log.info("start to remove File")
            os.remove(host_port_file)
        exec_overwrite_file(host_port_file, host_port)
        log.info(f"step2-5-1 end to output_execution_result_ex File host_port_file")
        job_info = self.get_job_info()
        write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.COMPLETED, 100, False)
        return True

    def sub_job_exec(self):
        # 执行备份子任务
        # 写文件，标记最新一次执行备份子任务的节点
        log.info(f"sub_job_exec start")
        delete_flag = os.path.join(self._cache_area, f"agent_ip")
        try:
            log.info(f"Currently, {self._host_ip} will perform operation delete in the post-task")
            exec_overwrite_file(delete_flag, self._host_ip)
        except Exception as err:
            log.warning(f"Fail to mark the latest backup subtask, error is {err}")

        # tpops日志备份，如果执行失败，直接算作任务成功，副本保留
        backup_no_send_data_flag_path = os.path.join(self._cache_area, "meta", self._copy_id, "no_send_data_flag")
        if os.path.exists(backup_no_send_data_flag_path):
            job_info = self.get_job_info()
            write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.COMPLETED, 100, True)
            return True
        log.info("step2-6 start to exec_back_up")

        # 设置数据库配置策略
        if not self.set_db_conf(self._json_param_object):
            log.error("GaussDB set configuration failed.")
            log_detail = LogDetail(logInfo="plugin_execute_gaussdb_conf_task_fail_label",
                                   log_info_param=[self._sub_job_id],
                                   logLevel=LogLevel.ERROR,
                                   logDetail=ErrorCode.ERR_GAUSSDB_CONF)
            sub_dict = SubJobDetails(taskId=self._job_id,
                                     subTaskId=self._sub_job_id,
                                     progress=100,
                                     logDetail=[log_detail],
                                     taskStatus=SubJobStatusEnum.FAILED.value)
            report_job_details(self._pid, sub_dict.dict(by_alias=True))
            return False
        log.info("GaussDB set configuration success.")

        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            backup_type = GaussBackupType.FULL
            backup_body = {
                "instance_id": self._instance_id,
                "name": set_backup_name(),
                "description": self._sub_job_id,
                "backup_type": backup_type
            }
        elif self.backup_type == BackupTypeEnum.LOG_BACKUP:
            backup_type = GaussBackupType.LOG
            backup_body = self.build_backup_body()
        else:
            backup_type = GaussBackupType.DIFF
            backup_body = {
                "instance_id": self._instance_id,
                "name": set_backup_name(),
                "description": "self._sub_job_id",
                "backup_type": backup_type
            }
        log.info(f"sub_job_exec backup_body: {backup_body}")
        self.sub_job_set()
        log.info("step2-6-2 end to sub_job_set")

        # 适配续作，已经下发的任务不再下发
        backup_request_result_file = os.path.join(self._cache_area, "meta", self._copy_id, "backup_request_result")
        if os.path.exists(backup_request_result_file):
            log.info("Skip cur request")
            ret_body = read_file(backup_request_result_file)
        else:
            ret_body = self._fun_inst.request_backup_job(self._instance_id, backup_body)
        log.info(f"Succeed start backup ret_body {ret_body}")

        # 记录请求结果
        backup_request_result_file = os.path.join(self._cache_area, "meta", self._copy_id, "backup_request_result")
        exec_overwrite_file(backup_request_result_file, ret_body)

        # 保存此次备份backup_id和job_id
        backup_id = self.save_backup_job_id_and_backup_id(ret_body)
        # 循环获取进度写入文件
        if self.backup_type == BackupTypeEnum.LOG_BACKUP.value:
            log.info("start to write_log_backup_progress")
            self._backup_status = GaussLogBackupStatus.RUNNING
            self.write_log_backup_progress()
        else:
            log.info("start to write_backup_progress")
            self._backup_status = GaussBackupStatus.RUNNING
            self.write_backup_progress(backup_id)
        # 非日志备份需查询backKey并存储，留作日志备份
        if backup_type != GaussBackupType.LOG:
            self.save_backup_key(backup_type, backup_id)
        log.info("step2-6 end to exec_back_up")
        if self._backup_status == GaussBackupStatus.SUCCEED or self._backup_status == GaussLogBackupStatus.SUCCEED:
            if backup_type != GaussBackupType.FULL:
                return True

            # 记录全备、增备ID和时间戳映射关系列表
            log.info("Start to save data backup ids")
            cache_path_parent = Path(self._cache_area).parent
            full_copy_id_and_timestamp_map_file = os.path.join(cache_path_parent, "full_copy_id_and_timestamp_maps")
            if os.path.exists(full_copy_id_and_timestamp_map_file):
                copy_id_list = read_file(full_copy_id_and_timestamp_map_file)

                # 适配续作场景，如果当前备份的副本ID、时间戳关系已记录，无需再次记录
                last_copy_id = copy_id_list[len(copy_id_list) - 1].get("copy_id", "")
                if last_copy_id == self._job_id:
                    log.info(f"Full backup timestamp map file {full_copy_id_and_timestamp_map_file} is already write")
                    return True
            else:
                copy_id_list = []
            copy_timestamp_map = {"copy_id": f"{self._job_id}", "time": int(time.time())}
            copy_id_list.append(copy_timestamp_map)
            log.info(f"Save copy_id_list: {copy_id_list} in {full_copy_id_and_timestamp_map_file}")
            exec_overwrite_file(full_copy_id_and_timestamp_map_file, copy_id_list)
            return True
        else:
            return False

    def build_backup_body(self):
        backup_file_type = GaussBackupType.LOG
        # 日志备份时，需要将上一次全备、上一次增量的backup_id存下来
        cache_path_parent = Path(self._cache_area).parent
        last_full_copy_id = self.get_last_full_copy_id()
        source_path = os.path.join(cache_path_parent, last_full_copy_id, "meta", last_full_copy_id, "backup_key")
        backup_ids = read_file(source_path)
        log.info(f"get backup_ids {backup_ids}")

        # 读取已归档到的时间
        depend_backup_ids = self.get_backup_ids(backup_ids, cache_path_parent)

        backup_body = {
            "instance_id": self._instance_id,
            "name": set_backup_name(),
            "description": "mannual backup",
            "backup_type": backup_file_type,
            "depend_backupIds": depend_backup_ids
        }
        log.info(f"sub_job_exec backup_body: {backup_body}. {self.get_log_comm()}")
        return backup_body

    def get_backup_ids(self, backup_ids, cache_path_parent):
        depend_backup_ids = []
        backup_key_file_archived_time = os.path.join(cache_path_parent, "archived_time")
        log.info(f"get backup_file_archived_time {backup_key_file_archived_time}")

        # 适配补丁、升级场景，full_copy_id_and_timestamp_map_file不存在，大小为1, 只传最近一次backup key
        full_copy_id_and_timestamp_map_file = os.path.join(cache_path_parent, "full_copy_id_and_timestamp_maps")
        copy_id_timestamp_map_list = []
        if os.path.exists(full_copy_id_and_timestamp_map_file):
            copy_id_timestamp_map_list = read_file(full_copy_id_and_timestamp_map_file)
        else:
            log.info(f"backup_key_file_archived_time not exists. {self.get_log_comm()}")
            return self.get_last_full_backup_id(backup_ids, depend_backup_ids)

        if len(copy_id_timestamp_map_list) <= 1:
            # 前面仅做过一次全备，返回; 存在升级场景，先推进最大可恢复时间到升级后全量备份。
            log.info(f"backup_key_file_archived_time not exists. {self.get_log_comm()}")
            return self.get_last_full_backup_id(backup_ids, depend_backup_ids)

        # 适配副本删除场景
        skip_backup_id = self.get_skip_backup_key(cache_path_parent)

        log.info(f"Get skip backup copy: {skip_backup_id}")

        # 根据最大可恢复时间筛选backup key
        if os.path.exists(backup_key_file_archived_time):
            # 取此次日志备份依赖backup key列表，并过滤掉已删除的副本
            depend_backup_ids = self.get_depend_ids(backup_key_file_archived_time, depend_backup_ids, skip_backup_id)
        else:
            log.info(f"backup_key_file_archived_time not exists. {self.get_log_comm()}")
            depend_backup_ids = self.get_last_full_backup_id(backup_ids, depend_backup_ids)
        log.info(f"depend_backup_ids: {depend_backup_ids}. {self.get_log_comm()}")
        return depend_backup_ids

    def get_depend_ids(self, backup_key_file_archived_time, depend_backup_ids, skip_backup_key):
        # 获取最大可恢复时间
        archived_time = read_file(backup_key_file_archived_time)
        log.info(f"Get archived_time {archived_time}")
        last_backup_key = ''
        # 获取全量备份列表
        full_progress = self.get_backup_progress()
        diff_progress = self.get_backup_diff_progress()
        backup_infos = full_progress.get("backups", [])
        backup_diff_infos = diff_progress.get("differential_backups", [])
        log.info(f"Get backup_infos: {backup_infos}, backup_diff_infos: {backup_diff_infos}")
        combined_infos = backup_infos + backup_diff_infos
        sorted_list = sorted(combined_infos, key=lambda x: x["end_time"] if x["end_time"] is not None else "")
        for backup_info in sorted_list:
            log.info(f"Get backup_info {backup_info}")
            backup_key = backup_info.get("id", "")

            # 筛选状态是成功的
            log.info(f"Get backup_key: {backup_key}")
            status = backup_info.get("status", "")
            if status != 'COMPLETED' and status != 'Active':
                continue

            # 筛选备份时间完整的
            end_time = backup_info.get("end_time", "")
            if end_time is None:
                continue
            log.info(f"Get end_time: {end_time}")
            format_end_time = self.to_timestamp(end_time)
            log.info(f"Get format_time: {format_end_time}")
            if archived_time < format_end_time:

                # 录入已归档时间戳之后的所有backup_key
                if backup_key not in skip_backup_key:
                    log.info(f"depend backup id: {backup_key} repo exist")
                    depend_backup_ids.append(f"{backup_key}")
                else:
                    log.info(f"depend backup id: {backup_key} repo not exist")
            else:
                last_backup_key = backup_key
        log.info(f"Get skip {skip_backup_key}, last {last_backup_key}")
        if last_backup_key != '' and last_backup_key not in skip_backup_key:
            # 录入已归档时间戳之前的一个backup_key
            depend_backup_ids = [f"{last_backup_key}"] + depend_backup_ids
        return depend_backup_ids

    def get_skip_backup_key(self, cache_path_parent):
        skip_backup_id = []
        log_associate_exist_full_copy_id = []
        log_associate_full_copy_id_path = os.path.join(self._cache_area, "log_associate_full_copy_id")
        if os.path.exists(log_associate_full_copy_id_path):
            copy_id_list = read_file(log_associate_full_copy_id_path)
            last_full_copy_id = self.get_last_full_copy_id()
            for copy_id in copy_id_list:
                if copy_id == last_full_copy_id:
                    log_associate_exist_full_copy_id.append(copy_id)
                    log.info("Skip check last data backup path")
                    continue
                # 检查这个全备副本是否存在
                copy_data_path = os.path.join(self.get_data_repo_path(), "data", copy_id)

                # 副本存在，跳过
                if os.path.exists(copy_data_path):
                    log_associate_exist_full_copy_id.append(copy_id)
                    log.info(f"Copy data path {copy_data_path} exist")
                    continue
                log.info(f"Copy data path {copy_data_path} not exist")
                past_backup_key_path = os.path.join(cache_path_parent, copy_id, "meta", copy_id, "backup_id")
                past_backup_key = read_file(past_backup_key_path)
                skip_backup_id.append(past_backup_key)
        else:
            log.info("Log backup merge sqlite, not find associate full copy id")
            log_associate_exist_full_copy_id.append(self.get_last_full_copy_id())
        log.info(f"Get log associate full copy id list: {log_associate_exist_full_copy_id}")
        log_associate_exist_full_copy_id_path = os.path.join(self._cache_area, "log_associate_exist_full_copy_id")
        exec_overwrite_file(log_associate_exist_full_copy_id_path, log_associate_exist_full_copy_id)
        return skip_backup_id

    def get_data_repo_path(self):
        """
        功能描述： 从任务参数中解析出x8000和文件系统的对应关系
        参数：
        @job_info: JobInfo 任务信息
        返回值： list
        """
        repositories = self._json_param_object.get("job", {}).get("repositories", [])
        log.info(f"get get_repo_list {repositories}")
        # 默认传给xbsa仓库路径为data仓地址
        for rep in repositories:
            repository_type = rep.get("repositoryType")
            log.info(f"get_repo_list repository_type {repository_type}")
            if repository_type != 1:
                continue
            mount_path = rep.get("path")
            log.info(f"Get type {repository_type} repo , mount_path is : {mount_path}")
            return mount_path[0]

    def get_last_full_backup_id(self, backup_ids, depend_backup_ids):
        backup_keys = backup_ids.split(',')
        for backup_key in backup_keys:
            backup_key_words = backup_key.split(' ')
            backup_key = backup_key_words[0]
            depend_backup_ids.append(f"{backup_key}")
        return depend_backup_ids

    def to_timestamp(self, date_string):
        date_format = '%Y-%m-%dT%H:%M:%S%z'
        timestamp = datetime.strptime(date_string, date_format).timestamp()
        return timestamp

    def sub_job_exec_backup_timelog(self):
        # 录入备份开始时间
        message = str(int((time.time())))
        timepath = os.path.join(self._cache_area, f'T{self._job_id}')
        # 续作场景，不刷新任务开始时间
        if not os.path.exists(timepath):
            exec_overwrite_file(timepath, message, json_flag=False)
            log.info(f"Success to write start time {message} to {timepath}.")

    def save_backup_job_id_and_backup_id(self, ret_body):
        self.sub_job_exec_backup_timelog()
        # 保存jobId
        job_file = os.path.join(self._cache_area, f"jobid_{self._job_id}")
        job_id = ret_body.get("job_id", "")
        if not ret_body.get("job_id", ""):
            log.error('Failed get job id!')
            return ""
        exec_overwrite_file(job_file, job_id)
        # 保存backupId
        backup_id = ret_body.get("backup", {}).get("id", "")
        # 存在cache仓，留作归档恢复用
        backup_id_file_cahce = os.path.join(self._cache_area, "meta", self._copy_id, "backup_id")
        # 存在meta仓，留作复制恢复用
        backup_id_file_meta = os.path.join(self._meta_area, "meta", self._copy_id, "backup_id")
        exec_overwrite_file(backup_id_file_cahce, backup_id)
        exec_overwrite_file(backup_id_file_meta, backup_id)
        log.info(f"step2-6-4 success to send backup request, jobId: {job_id}, backupId: {backup_id}")
        return backup_id

    def save_backup_key(self, backup_type, backup_key):
        # 日志备份backup_key, 时间戳联动关系说明
        # 1、每次全备、增量完成后，录入当前任务的backup_key及备份完成的时间戳。
        #
        # 2、日志备份完成后，计入当前已归档到的时间戳。
        #
        # 3、日志备份时，在组装日志备份接口参数backup_key列表时，需读取全备、增量backup_key和已归档到的时间戳。
        #
        # a.若读取不到已归档到的时间戳，将所有全备、增量backup_key置入backup_key列表
        #
        # b.若取到已归档到的时间戳, 且该时间戳之前无任何全备、增量backup_key，将所有全备、增量backup_key置入backup_key列表
        #
        # c.若取到已归档到的时间戳, 且该时间戳之前有backup_key, 将在该时间戳之前，离时间戳最近的backup_key， 置入backup_key列表

        log.info(f"save_backup_key backup_type: {backup_type}, backup_key: {backup_key}")
        finish_time = str(int((time.time())))
        backup_key_info = backup_key + ' ' + finish_time
        if backup_type == GaussBackupType.FULL:
            backup_key_file_meta = os.path.join(self._cache_area, "meta", self._copy_id, "backup_key")
            log.info(f"save backup_key_file_meta full {backup_key_file_meta}")
            # 适配续作，全量备份时，已经保存了backupkey， 无需再次保存
            if os.path.exists(backup_key_file_meta):
                log.info(f"Full backup file {backup_key_file_meta} is already write")
                return
            exec_overwrite_file(backup_key_file_meta, backup_key_info)
        # 增量在全备的基础上，添加backupKey
        elif backup_type == GaussBackupType.DIFF:
            last_full_copy_id = self.get_last_full_copy_id()
            cache_path_parent = Path(self._cache_area).parent
            backup_key_file_meta = os.path.join(cache_path_parent, last_full_copy_id, "meta",
                                                last_full_copy_id, "backup_key")
            log.info(f"save backup_key_file_meta diff {backup_key_file_meta}")
            previous_backup_keys = read_file(backup_key_file_meta)
            backup_keys = previous_backup_keys.split(',')

            # 适配续作，增量备份时，如果backup_key已经写入，无需再写
            last_backup_key = backup_keys[len(backup_keys) - 1][0]
            if last_backup_key == backup_key:
                log.info(f"Full backup file {backup_key_file_meta} is already write")
                return
            backup_key_info = previous_backup_keys + "," + backup_key_info
            log.info(f"save_backup_key backup_key_info: {backup_key_info}. {self.get_log_comm()}")
            exec_overwrite_file(backup_key_file_meta, backup_key_info)

    def sub_merge_db(self):
        log.info("start to sub_merge_db")
        self.merge_sqlite()
        log.info("end to sub_merge_db")
        job_info = self.get_job_info()
        write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.COMPLETED, 100, False)
        return True

    def merge_sqlite(self):
        # 如果此次日志备份并未实际产生备份数据库，跳过合并sqlite
        backup_no_send_data_flag_path = os.path.join(self._cache_area, "meta", self._copy_id, "no_send_data_flag")
        if os.path.exists(backup_no_send_data_flag_path):
            log.info("Skip merge sqlite, no send data in backup")
            return

        # 合并此次以及上一次全备到现在为止所有的sqlite
        log_target_path = self.merge_cur_and_past_sqlite()

        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            # 全量备份，生成restoreMergeSqlite, 这个sqlite记录当前全量备份到下一次全量备份间的所有xbsa对象记录
            restore_part_path = os.path.join(self._cache_area, "meta", self._copy_id, "restoreMergeSqlite")
            self.copy_mergedb(log_target_path, restore_part_path)
            log.info("Full backup, merge cur sqlite")
            return

        # 增量备份、日志备份场景下，在最近一次全量备份meta目录中记录最新的xbsa对象合集sqlite, 用于下次备份找到上次备份的xbsa对象集
        last_full_copy_id = self.get_last_full_copy_id()
        cache_path_parent = Path(self._cache_area).parent
        restore_part_path = os.path.join(cache_path_parent, last_full_copy_id, "meta", last_full_copy_id,
                                         "restoreMergeSqlite")
        self.copy_mergedb(log_target_path, restore_part_path)

        if self.backup_type != BackupTypeEnum.LOG_BACKUP:
            log.info("Data backup, merge past fill sqlite")
            return

        # 日志备份场景下，在当前meta仓中记录恢复依赖xbsa对象合集sqlite, 用于按时间恢复场景
        restore_path = os.path.join(self._cache_area, "meta", self._copy_id, "restoreSqlite")
        self.copy_mergedb(log_target_path, restore_path)

        # 构造/meta/copyId/restoreSqlite, 内容是用这个日志副本按时间恢复时，合并这个日志副本依赖副本的sqlite
        # 1、获取当前日志副本依赖的全量备份ID列表，排除第一个，也就是当前的全量副本ID
        log_associate_full_copy_id_path = os.path.join(self._cache_area, "log_associate_exist_full_copy_id")
        if os.path.exists(log_associate_full_copy_id_path):
            copy_id_list = read_file(log_associate_full_copy_id_path)
        else:
            log.info("Log backup merge sqlite, not find associate full copy id")
            return

        log.info(f"Log back up report copy id list {copy_id_list}")

        # 2、合并这些日志副本列表，如果遇到相同名字的文件, 默认新的覆盖旧的，除了采用追加写的barrier文件
        restore_merge_sqlite_path = os.path.join(self._cache_area, "meta", self._copy_id, "restoreSqlite",
                                                 "backup_merge")
        for copy_id in copy_id_list:
            # 当前全量副本做过合并，不再合并
            if copy_id == last_full_copy_id:
                log.info("Skip merge cur backup sqlite")
                continue
            restore_part_path = os.path.join(cache_path_parent, copy_id, "meta", copy_id,
                                             "restoreMergeSqlite")
            if not os.path.exists(restore_part_path):
                log.info(f"Past backup sqlite exist, skip: {restore_part_path}")
                continue
            aggregate_single_copy_object_data(restore_merge_sqlite_path, restore_part_path, self.get_repo_list(), False)

    def merge_cur_and_past_sqlite(self):
        # 日志备份合并目标路径
        log_target_path = os.path.join(self._cache_area, "meta", self._copy_id, "logdb", "backup_merge")  # 聚合后的路径
        if not os.path.exists(log_target_path):
            if not exec_mkdir_cmd(log_target_path):
                return False
        # 合并本次任务生成的xbsa对象
        source_path = os.path.join(self._meta_area, "meta", self._copy_id, "objectmeta")
        aggregate_single_copy_object_data(log_target_path, source_path, self.get_repo_list(), False)
        log.info(f"merge cur backup task db success")
        # 非全量备份场景，合并上次备份xbsa表
        if self.backup_type != BackupTypeEnum.FULL_BACKUP:
            self.merge_past_xbsa_to_cur_xbsa(log_target_path)
        # meta仓下xbsa表路径
        logdb_meta_path = os.path.join(self._meta_area, "meta", self._copy_id, "logdb")
        self.copy_mergedb(log_target_path, logdb_meta_path)
        log.info("step2-5 end to sub_merge_db")
        return log_target_path

    def copy_mergedb(self, mergedb_cache_path, mergedb_meta_path):
        if not os.path.exists(mergedb_meta_path):
            exec_mkdir_cmd(mergedb_meta_path)
        exec_cp_cmd(mergedb_cache_path, mergedb_meta_path, 'root')

    def merge_past_xbsa_to_cur_xbsa(self, target_path):
        # 将past_copy_id目录下的过往xbsa表，合并至本次xbsa表中
        last_full_copy_id = self.get_last_full_copy_id()
        cache_path_parent = Path(self._cache_area).parent
        restore_part_path = os.path.join(cache_path_parent, last_full_copy_id, "meta", last_full_copy_id,
                                         "restoreMergeSqlite")
        if os.path.exists(restore_part_path):
            aggregate_single_copy_object_data(target_path, restore_part_path, self.get_repo_list(), False)
        else:
            # 适配升级场景, 从logdb目录下取上次的sqlite用于合并
            past_copy_id = self.get_last_copy_id()
            mount_path_parent = Path(self._cache_area).parent
            last_source_path = os.path.join(mount_path_parent, past_copy_id, "meta", past_copy_id, "logdb")
            aggregate_single_copy_object_data(target_path, last_source_path, self.get_repo_list(), False)
            log.info(f"merge_last_full_xbsa_db success")
        log.info(f"merge past xbsa sqlite success")

    def get_last_full_copy_id(self):
        return self.get_past_copy_id(LastCopyType.FULL_COPIES)

    def get_last_copy_id(self):
        cache_path_parent = Path(self._cache_area).parent
        last_copy_id_file = os.path.join(cache_path_parent, 'last_copy_id')
        with open(last_copy_id_file, "r", encoding='utf-8') as copy_info:
            last_copy_id = copy_info.read().strip()
        log.info(f"get last_copy_id: {last_copy_id}")
        return last_copy_id

    def save_last_id(self):
        # 记录上一次copy_id
        cache_path_parent = Path(self._cache_area).parent

        # 记录全备、增备、日志备份 副本ID
        last_copy_id_path = 'last_copy_id'
        last_copy_id_file = os.path.join(cache_path_parent, last_copy_id_path)
        exec_overwrite_file(last_copy_id_file, self._copy_id, json_flag=False)
        log.info(f"Save copy id")

        if self.backup_type != BackupTypeEnum.FULL_BACKUP:
            return

        # 记录全备、增备副本ID
        last_increment_copy_id_path = 'last_increment_copy_id'
        last_increment_copy_id_file = os.path.join(cache_path_parent, last_increment_copy_id_path)
        exec_overwrite_file(last_increment_copy_id_file, self._copy_id, json_flag=False)
        log.info(f"Save increment copy id")

    def get_past_copy_id(self, past_copy_id_type):
        last_copy_info = self.get_last_copy_info(past_copy_id_type)
        last_copy_id = last_copy_info.get("id", "")
        log.info(f"last_copy_info{last_copy_info}get last_copy_id: {last_copy_id}. {self.get_log_comm()}")
        return last_copy_id

    def get_repo_list(self):
        """
        功能描述： 从任务参数中解析出x8000和文件系统的对应关系
        参数：
        @job_info: JobInfo 任务信息
        返回值： list
        """
        res_list = []
        repositories = self._json_param_object.get("job", {}).get("repositories", [])
        log.info(f"get get_repo_list {repositories}")
        # 默认传给xbsa仓库路径为data仓地址
        useful_repo = [1]
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            # 日志备份传给xbsa仓库路径为log仓地址
            useful_repo = [1, 3]
        for rep in repositories:
            repository_type = rep.get("repositoryType")
            log.info(f"get_repo_list repository_type {repository_type}")
            if repository_type not in useful_repo:
                continue
            log.info(f"get_repo_list useful_repo {useful_repo}")
            role = rep.get("role")
            device_sn = rep.get("extendInfo").get("esn")
            mount_path = rep.get("path")
            log.info(f"Get type {repository_type} repo , mount_path is : {mount_path}")
            # 日志备份场景下，给xbsa的mountpath取父路径，恢复时xbsa可据xbsa文件系统表查询多次日志备份副本
            mount_path = self.build_mount_paht_preffix(mount_path, repository_type)
            log.info(f"Cur backup type is log, mount path: {mount_path[0]}")
            fs_dict = {
                "id": rep.get("extendInfo", {}).get("fsId", ""),
                "name": rep.get("remotePath", "").strip("/").split("/")[0],
                "sharePath": rep.get("remotePath", "").strip("/"),
                "mountPath": mount_path
            }

            # 判断当前文件系统属于哪个PM底座
            not_found = True
            not_found = self.scan_file_system(device_sn, fs_dict, not_found, res_list)

            if not_found:
                res_list.append({"role": role, "deviceSN": device_sn, "filesystems": [fs_dict]})
        return res_list

    def build_mount_paht_preffix(self, mount_path, repository_type):
        if repository_type == 3:
            mount_path = [s[:s.rfind('/')] for s in mount_path]
            log.info(f"Cur backup type is log, mount path: {mount_path}")
        return mount_path

    def scan_file_system(self, device_sn, fs_dict, not_found, res_list):
        for res in res_list:
            if res.get("deviceSN", "") == device_sn:
                filesystems = res.get("filesystems", [])
                no_same_file_system = True
                # 如果当前有同名文件系统，将mountPath合并
                for filesystem in filesystems:
                    no_same_file_system = self.scan_same_system(filesystem, fs_dict, no_same_file_system)
                # 如果当前没有当前没有同名文件系统，独立添加到文件系统列表
                if no_same_file_system:
                    fs_dict_name = fs_dict.get("name", "")
                    log.info(f"no same file system with {fs_dict_name}")
                    res.get("filesystems", []).append(fs_dict)
                not_found = False
        return not_found

    def scan_same_system(self, filesystem, fs_dict, no_same_file_system):
        if filesystem.get("name", "") == fs_dict.get("name", ""):
            no_same_file_system = False
        return no_same_file_system

    def get_repo_file_system_relation(self):
        """
        功能描述： 从任务参数中解析出x8000和文件系统的对应关系
        参数：
        @job_info: JobInfo 任务信息
        返回值： list
        """
        repositories = self._json_param_object.get("job", {}).get("repositories", [])
        log.info(f"get get_repo_list")
        relation = {}
        for rep in repositories:
            repository_type = rep.get("repositoryType")
            log.info(f"get_repo_file_system_relation repository_type {repository_type}")
            useful_repo = [1]
            if self.backup_type == BackupTypeEnum.LOG_BACKUP:
                useful_repo = [1, 3]
            if repository_type not in useful_repo:
                continue
            log.info(f"get_repo_file_system_relation useful_repo {useful_repo}")
            file_system_name = rep.get("remotePath", "").strip("/").split("/")[0]
            mount_path = rep.get("path")[0]
            relation[file_system_name] = mount_path
        log.info(f"get_repo_file_system_relation relation {relation}")
        return relation

    def create_host_db(self):
        """
        创建dwsHosts.db文件
        """
        if not self._meta_area:
            log.error(f"No usable meta path.")
            return False
        self._db_name = os.path.join(self._meta_area, "meta", "dwsHosts.db")
        if os.path.islink(self._db_name):
            log.warn(f"This is a link file, remove it.")
            os.remove(self._db_name)
        if os.path.isfile(self._db_name):
            log.debug(f"Db {self._db_name} file exists.")
            return True
        try:
            object_conn = sqlite3.connect(self._db_name)
        except Exception as ex:
            log.error(f"Connect sqlite {self._db_name} failed for {ex}")
            return False
        if not object_conn:
            log.error(f"Connect sqlite {self._db_name} failed.")
            return False
        object_cur = object_conn.cursor()
        if not object_cur:
            log.error(f"Connect sqlite {self._db_name} failed.")
            return False
        object_cur.execute("CREATE TABLE IF NOT EXISTS [DwsHostFilesystemTable] ("
                           "[hostname] VARCHAR(256) NOT NULL PRIMARY KEY,"
                           "[filesystemName] VARCHAR(256) NOT NULL,"
                           "[filesystemId] VARCHAR(128) NOT NULL,"
                           "[filesystemDeviceId] VARCHAR(256) NOT NULL,"
                           "[rsv1] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            log.error(f"Create dws table failed.")
            return False
        #  更改权限
        os.chmod(self._db_name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
        os.chmod(os.path.join(self._meta_area, "meta"), stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
                 stat.S_IROTH | stat.S_IXOTH)
        if not exec_lchown(self._db_name, "root", "rdadmin"):
            log.error(f"Change owner for {self._db_name} failed.")
            return False
        log.info(f"Create db({self._db_name}) successfully.")
        return True

    def sub_query_copy(self):
        """
        功能描述：查询备份副本, 备份成功, 执行后置任务之前执行, 备份失败不会调用
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'execute to query_backup_copy, pid: {self._pid}, job_id: {self._job_id}')
        copy_info = {}
        repositories = []
        copy_id = ""
        nodes = []
        try:
            file_content = self._json_param_object
        except Exception as ex:
            log.error(ex, exc_info=True)
        try:
            nodes = file_content['job']['protectEnv']['nodes']
        except Exception as ex:
            log.error(ex, exc_info=True)
        try:
            copy_id = file_content['job']['copy'][0]['id']
        except Exception as ex:
            log.error(ex, exc_info=True)
        try:
            repositories = file_content['job']['repositories']
        except Exception as ex:
            log.error(ex, exc_info=True)

        report_repositories = []
        for repository in repositories:
            repository_type = int(repository['repositoryType'])
            if repository_type == RepositoryDataTypeEnum.META_REPOSITORY.value or \
                    repository_type == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                repository['remotePath'] = f"{repository['remotePath']}/{copy_id}"
                report_repositories.append(repository)
        copy_info['repositories'] = report_repositories
        copy_info['extendInfo'] = {'nodes': nodes, 'copyId': copy_id, 'backupTime': int(time.time())}
        output_result_file(self._pid, copy_info)
        job_info = self.get_job_info()
        write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.COMPLETED, 100, False)
        return True

    def sub_job_set(self):
        log.info("start to sub_job_set_extend_info")
        # 读取Roach子任务生成的文件
        host_port_file_path = os.path.join(self._meta_area, "hostInfo", self._job_id)
        ips = ""
        ports = ""
        log.info(f"start to sub_job_set_extend_info host_port_file_path:{host_port_file_path}")
        lst = os.listdir(host_port_file_path)
        for i in lst:
            path_new = os.path.join(host_port_file_path, i)
            if os.path.isfile(path_new):
                # 读取文件
                host_port = self.read_param_file(path_new)
                log.info(f"step2-3-1 start to sub_job_set_extend_info host_port")
                host_ports = host_port.split(":")
                ips = f"{ips}{host_ports[0]},"
                ports = f"{ports}{host_ports[1]},"
        # 打乱ips和ports数组
        ips = ips.rstrip(',')
        ports = ports.rstrip(',')
        try:
            ips, ports = self.shuffle_ip_ports(ips, ports)
        except Exception as err:
            log.error(f"shuffle ips and ports failed, err: {err}")
        log.info(f"start to sub_job_set_extend_info _instance_id:{self._instance_id}")
        # 调用设置扩展信息接口
        try:
            self._fun_inst.set_instance_extend_info_twice(self._instance_id, ips, ports, self.client_crt,
                                                          RoachConstant.XBSA_FILE_BACKUP_PATH)
        except Exception as err:
            log.error(f"set_instance_extend_info_twice fail: {err}")
            raise Exception("set instance extend info fail") from err
        # 设置证书文件
        return True

    def shuffle_ip_ports(self, ips, ports):
        log.info(f"Start shuffle ips: {ips} and ports: {ports}, {self.log_format()}.")
        ip_list = ips.split(',')
        port_list = ports.split(',')
        ip_port_pairs = list(zip(ip_list, port_list))
        random.shuffle(ip_port_pairs)
        shuffled_ips, shuffled_ports = zip(*ip_port_pairs)
        ip_list = ','.join(list(shuffled_ips))
        port_list = ','.join(list(shuffled_ports))
        log.info(f"End shuffle ips: {ips} and ports: {ports}, {self.log_format()}.")
        return ip_list, port_list

    def get_total_data_size(self):
        """
        汇总已备份的数据总量, 单位MB
        """
        total_data_size = 0
        speed_dir_path = os.path.join(self._cache_area, "tmp", self._copy_id, "speed")
        if not os.path.isdir(speed_dir_path):
            log.error(f"Speed dir path [{speed_dir_path}] not exists, {self.log_format()}.")
            return total_data_size
        for item in os.listdir(speed_dir_path):
            host_key_path = os.path.join(speed_dir_path, item)
            if not os.path.isdir(host_key_path):
                continue
            for speed_item in os.listdir(host_key_path):
                speed_txt_file = os.path.join(host_key_path, speed_item)
                if not os.path.basename(speed_txt_file).startswith("xbsa_speed"):
                    continue
                try:
                    speed_obj = read_file(speed_txt_file)
                except Exception as err:
                    log.exception(f"Get speed obj err: {err}, {self.log_format()}.")
                    continue
                if not speed_obj:
                    log.warn(f"Get json obj from [{speed_txt_file}] failed, {self.log_format()}.")
                    continue
                total_data_size += speed_obj.get("totalSizeInMB", 0)
        log.info(f"Get total data size {total_data_size} MB, {self.log_format()}.")
        return total_data_size

    def log_format(self):
        """
        功能： 日志格式化返回
        """
        return f"pid: {self._pid}, job_id: {self._job_id}, sub_job_id: {self._sub_job_id}"

    def verify_end_early_exec(self):
        delete_flag = os.path.join(self._cache_area, f"agent_ip")
        if not os.path.exists(delete_flag):
            return True
        agent_ip = self._host_ip
        try:
            agent_ip = read_file(delete_flag)
        except Exception as err:
            log.error(f"failed to read delete_agent_ip, error is {err}")
        if agent_ip != self._host_ip:
            log.info(f"There are no files that need to be deleted. delete_ip is {agent_ip}, self_ip is {self._host_ip}")
            return True
        return False

    def delete_xbsa_cache_info(self):
        cache_info_path = os.path.join(RoachConstant.XBSA_FILE,
                                       f"xbsa_cacheInfo_info_tpops_{self._instance_id}_xbsa_backup.txt")
        try:
            os.remove(cache_info_path)
            log.info("Remove xbsa cache info path successfully.")
        except Exception as err:
            log.error(f"Remove cache info path fail;: {cache_info_path}, error is {err}")

    def do_post_job(self):
        log.info(f"step 3-1 start to do post job")
        # 终止xbsa接收数据
        self.delete_xbsa_cache_info()
        if self.verify_end_early_exec():
            return

        # 删除备份过程中进度文件
        try:
            progress_file = os.path.join(self._cache_area, f"progress_{self._job_id}_{self._sub_job_id}")
            if os.path.exists(progress_file):
                os.remove(progress_file)
            else:
                log.info(f'Progress file {progress_file} not exist! failed to delete while doing post job!')
        except Exception:
            log.error("failed to delete while doing post job")

        # 读取任务进度结果
        backup_result = self._json_param_object.get("backupJobResult")
        log.info(f"job result is {backup_result}")
        if backup_result == BackupJobResult.SUCCESS:
            # 删除下次备份转全备标志
            self.delete_next_backup_to_full_flag()
            # 存储上一次copy_id
            self.save_last_id()
            return

        # 增量备份失败场景下，删除backup_key列表中当前的copy_id
        if self.backup_type == BackupTypeEnum.DIFF_BACKUP:
            log.info(f"diff back job failed, delete cur backup key. {self.get_log_comm()}")
            last_full_copy_id = self.get_last_full_copy_id()
            cache_path_parent = Path(self._cache_area).parent
            backup_key_file_meta = os.path.join(cache_path_parent, last_full_copy_id, "meta",
                                                last_full_copy_id, "backup_key")
            previous_backup_keys = read_file(backup_key_file_meta)
            log.info(f"get_old_backup_key backup_key_info: {previous_backup_keys}. {self.get_log_comm()}")
            suffix_pos = previous_backup_keys.rfind(',')
            suffix_copy_id = previous_backup_keys[suffix_pos + 1:]
            cur_backup_keys = previous_backup_keys
            if suffix_copy_id == self._copy_id:
                cur_backup_keys = previous_backup_keys[0: suffix_pos]
            log.info(f"save_new_backup_key backup_key_info: {cur_backup_keys}. {self.get_log_comm()}")
            exec_overwrite_file(backup_key_file_meta, cur_backup_keys)

        # 增量备份失败场景下，删除本次任务cache仓
        for temp_dir in os.listdir(self._cache_area):
            try:
                conf_dir = os.path.join(self._cache_area, temp_dir)
                if os.path.isdir(conf_dir):
                    shutil.rmtree(conf_dir)
            except Exception as e:
                log.error(f"Fail to delete dir {os.path.join(self._cache_area, temp_dir)} for {e}."
                          f"main task {self._job_id}, subtask {self._sub_job_id}")
                continue

        # 全备增备失败场景下，删除本次任务data仓/data/{job_id}
        for data_area in self._data_area_list:
            path = os.path.join(data_area, "data", self._job_id)
            log.debug(f"Delete path :{path}")
            shutil.rmtree(path)
        return

    def delete_next_backup_to_full_flag(self):
        # 全量备份成功场景，移除备份转全备flag
        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            cache_path_parent = Path(self._cache_area).parent
            next_backup_to_full_flag_path = os.path.join(cache_path_parent, "nextBackupToFullFlag")
            if os.path.exists(next_backup_to_full_flag_path):
                os.remove(next_backup_to_full_flag_path)

    def upload_log_backup_info(self):
        # 日志备份完成，需查询可恢复时间范围并上报，以及查询cn重启标志，并记录
        # 查询basic_recovery_time相对文件路径
        source_path = os.path.join(self._meta_area, "meta", self._copy_id, "objectmeta")
        restore_time_files = self.query_restore_time_file_relative_path(source_path)
        if not restore_time_files:
            log.error(f"not found basic_recovery_time file")
            raise Exception(f"not found basic_recovery_time file")

        time_body_infos = self.get_time_body_infos(restore_time_files)
        if not time_body_infos:
            log.error(f"get time body infos failed")
            raise Exception(f"get time body infos failed")

        # 取出文件系统列表
        time_body_info = merge_time_body_info(time_body_infos)
        log.info(f"Get restore time body info: {time_body_info}")

        # 上报restore_time
        ret = self.update_restore_time(time_body_info, True)

        # 保存已归档到的时间
        self.save_archived_time(time_body_info)
        self.save_show_archived_time(time_body_info)

        if not ret:
            log.error(f"update restore time error")

    def save_archived_time(self, time_body_info):
        # 记录归档到的时间，下次日志归档从该时间开始
        archived_time = int(time_body_info[len(time_body_info) - 1][1])
        last_full_copy_id = self.get_last_full_copy_id()
        cache_path_parent = Path(self._cache_area).parent
        backup_key_file_archived_time = os.path.join(cache_path_parent, "archived_time")
        log.info(f"get backup_key_file_archived_time {backup_key_file_archived_time}, archived_time: {archived_time}")
        exec_overwrite_file(backup_key_file_archived_time, archived_time)

    def read_xbsa_objects(self, restore_time_file_absolute_path):
        read_str = ""
        if not os.path.isfile(restore_time_file_absolute_path):
            raise Exception(f"File:{restore_time_file_absolute_path} not exist")
        try:
            with open(restore_time_file_absolute_path, "r", encoding='UTF-8') as f_content:
                read_str = f_content.read()
        except Exception as ex:
            raise Exception("parse param file failed") from ex
        lines = read_str.split("\n")
        log.info(f"read_xbsa_objects success {self.get_log_comm()}")
        return lines

    def query_restore_time_file_relative_path(self, object_data_path):
        """
        功能描述：查询本次备份生成的所有可恢复时间文件的相对路径。

        参数:
        object_data_path (str): 被合成的对象数据文件存放路径。

        返回值:
        list of tuples: 每个元组包含一个`basic_recovery_time`文件的相对路径和对应的文件系统。
                        格式为：[(restore_time_file_path1, file_system1), (restore_time_file_path2, file_system2), ...]
                        如果没有找到任何文件，则返回空列表。
        """
        restore_time_files = []  # 用于存储所有找到的恢复时间文件信息
        db_file_list = get_all_db_files(object_data_path)
        log.info(f"Get db file list: {db_file_list}")
        if not db_file_list:
            return restore_time_files
        for db_file in db_file_list:
            log.info(f"start to read line from {db_file}")
            temp_object_cur = sqlite3.connect(db_file).cursor()
            cmd = "select * from BsaObjTable where objectName like '%basic_recovery_time%'"
            object_tables = temp_object_cur.execute(cmd).fetchall()
            if not object_tables:
                log.warning(f"do not find query basic_recovery_time {db_file}.")
                continue

            for row in object_tables:
                restore_time_file_path = row[14]
                file_system = row[15]
                restore_time_files.append((restore_time_file_path, file_system))
                log.info(f"Found restore time file: {restore_time_file_path}, File System: {file_system}")
            temp_object_cur.close()
        log.info(f"Get db file list: {restore_time_files}")
        return restore_time_files

    def update_restore_time(self, time_body_info, can_restore):
        json_copy = self.build_log_backup_copy_info(time_body_info, can_restore)
        copy_info = {"copy": json_copy, "jobId": self._job_id}
        try:
            exec_rc_tool_cmd(self._job_id, RpcParamKey.REPORT_COPY_INFO, copy_info)
        except Exception as err_info:
            log.error(f"Report copy info fail.err: {err_info},{self.get_log_comm()}")
            return False
        log.info(f"Report copy info succ {copy_info}.{self.get_log_comm()}")
        return True

    def build_log_backup_copy_info(self, time_body_info, can_restore):
        """
        查询数据备份集信息
        :return:
        """
        # 如果之前有部分成功的副本，截取部分成功副本上报可恢复时间段之后的可恢复时间
        time_ranges = self.get_unreported_time_ranges(time_body_info)
        begin_time = time_ranges[0][0]
        end_time = time_body_info[-1][-1]

        # 读取本次日志备份副本关联的全量副本ID
        log_associate_full_copy_id_path = os.path.join(self._cache_area, "log_associate_exist_full_copy_id")
        if os.path.exists(log_associate_full_copy_id_path):
            associate_copy_list = read_file(log_associate_full_copy_id_path)
            log.info(f"Get associate copy list: {associate_copy_list}")
            copy_id_list = [associate_copy_list[len(associate_copy_list) - 1]]
        else:
            copy_id_list = [self.get_last_full_copy_id()]
        log.info(f"Log back up report copy id list {copy_id_list}")
        out_put_info = {
            "extendInfo": {
                "backupTime": begin_time,
                "beginTime": begin_time,
                "endTime": end_time,
                "beginSCN": None,
                "copyId": self._copy_id,
                "endSCN": None,
                "backupset_dir": '',
                "backupSetName": "",
                "backupType": "",
                "baseBackupSetName": "",
                "dbName": "",
                "groupId": '',
                "tabal_space_info": [],
                "associatedCopies": copy_id_list,
                "logDirName": self._data_area,
                "canRestore": can_restore
            }
        }
        extend_info = out_put_info.get("extendInfo", {})
        time_range_info = {"timeRange": time_body_info}
        log.info(f"time_range_info: {time_range_info}")
        extend_info.update(time_range_info)
        log.info(f"build_log_backup_copy_info: {out_put_info}")

        return out_put_info

    def get_unreported_time_ranges(self, time_ranges):
        # 获取起始时间点
        cache_path_parent = Path(self._cache_area).parent
        backup_key_file_archived_time = os.path.join(cache_path_parent, "show_archived_time")
        if os.path.exists(backup_key_file_archived_time):
            show_archived_time = int(read_file(backup_key_file_archived_time))
            log.info(f"Get database time ranges: {time_ranges}, archived time: {show_archived_time}")

            # 筛选出尚未上报过的时间段
            time_ranges = [time_range for time_range in time_ranges if int(time_range[1]) > show_archived_time]
            if len(time_ranges) == 0:
                # 数据库返回可恢复时间窗口均无效， 返回默认时间窗口
                log.info(f"Get time range length is empty, return default time range")
                return [[show_archived_time, show_archived_time + 1]]
            log.info(f"Get filter time ranges: {time_ranges}")
            begin_time_range = time_ranges[0]
            begin_time = begin_time_range[0]
            log.info(f"Get show archived time: {show_archived_time}, begin time: {begin_time}")
            if int(begin_time) < show_archived_time:
                log.info("Show archive time is later than begin time, change begin time")
                begin_time = show_archived_time
                begin_time_range[0] = begin_time
                if int(begin_time) > int(begin_time_range[1]):
                    log.info(
                        f"Show begin time {begin_time} is later than end time {begin_time_range[1]}, change end time")
                    begin_time_range[1] = int(begin_time) + 1
        return time_ranges


    def set_db_conf(self, param):
        log.info("Begin set gauss db config.")
        try:
            backup_sla = param.get("job", {}).get("extendInfo", {}).get("backupTask_sla")
            policy_list = json.loads(backup_sla).get("policy_list")
            backup_policy = next((policy for policy in policy_list if policy.get("type") == "backup"), policy_list[0])
        except Exception as err:
            log.error(f"Get backup task sla failed, err: {err}.")
            raise GaussDBException(f"Get backup task sla failed, err: {err}.")
        try:
            instance_id = param.get("job", {}).get("protectObject", {}).get("id", "")
            return self._fun_inst.set_gaussdb_conf(instance_id, backup_policy)
        except Exception as err:
            log.error(f"Failed set gauss db config, err {err}.")
            raise GaussDBException(f"Failed set gauss db config, err {err}.")

    def get_job_info(self):
        job_info = JobInfo(pid=self._pid,
                           job_id=self._job_id,
                           sub_job_id=self._sub_job_id,
                           copy_id=self._copy_id,
                           sub_job_type=self._sub_job_name,
                           cache_path=self._cache_area,
                           instance_id=self._instance_id)
        return job_info

    def _get_all_db_files(self, object_data_path):
        """
        获取所有要被聚合的db文件
        """
        db_file_list = []
        for host_key_path in os.listdir(object_data_path):
            db_path = os.path.join(object_data_path, host_key_path)
            if not os.path.isdir(db_path) or len(glob.glob(os.path.join(db_path, "*.db"))) == 0:
                log.warn(f"There is no object data in metadata path {db_path}. main task:{self._job_id}")
                continue

            for database in os.listdir(db_path):
                if not database.endswith(".db"):
                    continue
                db_file_list.append(os.path.join(db_path, database))
        return db_file_list
