#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import glob
import json
import os
import shutil
import sqlite3
import stat
import threading
import time
from datetime import datetime
from pathlib import Path

from common.common import output_result_file
from common.common_models import SubJobDetails, LogDetail, SubJobModel
from common.const import ParamConstant, SubJobStatusEnum, RepositoryDataTypeEnum, ExecuteResultEnum
from common.file_common import exec_lchown
from common.util.exec_utils import exec_mkdir_cmd, exec_cp_cmd, exec_overwrite_file
from dws.commons.common import get_uid_gid
from goldendb.schemas.glodendb_schemas import ActionResponse
from hcs.gaussdb.common.const import SubJobType, SubJobPolicy, GaussSubJobName, EnvName, GaussBackupStatus, \
    GaussDBCode, LogLevel, ErrorCode, RoachConstant, RpcParamKey, LastCopyType, GaussBackupType, \
    BackupTypeEnum, TaskTypeEnum, SubJobPriorityEnum, BackupJobResult, JobInfo, RepositoryType, UserInfo, \
    PermissionNode, BusinessConfigType
from hcs.gaussdb.common.gaussdb_common import report_job_details, \
    get_std_in_variable, set_backup_name, extract_ip, read_file, exec_rc_tool_cmd, \
    get_all_db_files, check_path_valid, check_command_injection, aggregate_single_copy_object_data, \
    write_progress_file_with_speed, write_progress_file, set_user_and_group, set_permisson
from hcs.gaussdb.common.safe_get_information import ResourceParam
from hcs.gaussdb.common.gaussdb_common import get_agent_roach_host_and_port
from hcs.gaussdb.handle.resource.resource_info import ResourceInfo
from hcs.gaussdb.logger import log
from hcs.sdk.hcs_manager import HcsManager


class BackUp:

    def __init__(self, pid, job_id, sub_job_id, data, json_param):
        if not json_param:
            log.error("Parse params obj is null.")
            raise Exception("Parse params obj is null.")
        self._std_in = data
        self._pid = pid
        self._job_id = job_id
        self._sub_job_id = sub_job_id
        self._json_param_object = json_param
        self._concrete_object_db = "backupkey.db"
        self._copy_id = self._json_param_object.get("job", {}).get("copy", [])[0].get("id", "")
        self._end_point = self._json_param_object.get("job", {}).get("protectEnv", {}).get("endpoint", "")
        self._host_ip = self.get_cur_task_ip()
        self._logdetail = None
        self._err_info = {}
        self._query_progress_interval = 15
        self._instance_id = self._json_param_object.get("job", {}).get("protectObject", {}).get("id", "")
        self._sub_job_name = ""
        self.backup_type = self._json_param_object.get("job", {}).get("jobParam", {}).get("backupType", "1")
        self._cache_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.CACHE_REPOSITORY)
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            self._data_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.LOG_REPOSITORY)
        else:
            self._data_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.DATA_REPOSITORY)
        self._meta_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.META_REPOSITORY)

        self._job_status = SubJobStatusEnum.RUNNING
        self._backup_status = GaussBackupStatus.RUNNING
        self._err_code = 0
        self.user_name = get_std_in_variable(f"{EnvName.IAM_USERNAME}_{pid}")
        self._db_name = ""
        self._backupkey_name = ""
        # 组装资源接入请求体
        self._extend_info = self._json_param_object.get("job", {}).get("protectEnv", {}).get("extendInfo", {})
        self._project_name = self._extend_info.get("projectName", "")
        self._project_id = self._extend_info.get("projectId", "")
        self._project_addr = self._extend_info.get("pmAddress", "")
        self._project_domain = self._extend_info.get("iamAccountName", "")
        self._business_addr = self._extend_info.get("businessAddr", "")
        self.client_crt = self._extend_info.get("client_crt", "")
        self._fun_inst = ResourceInfo(pid, data, self._project_addr, self._project_name, self._project_domain,
                                      self._business_addr, self._project_id)

    @staticmethod
    def read_param_file(file_path):
        """
        解析参数文件
        :return:
        """
        if not os.path.isfile(file_path):
            raise Exception(f"File:{file_path} not exist")
        try:
            with open(file_path, "r", encoding='UTF-8') as f_content:
                json_dict = json.loads(f_content.read())
        except Exception as ex:
            raise Exception("parse param file failed") from ex
        return json_dict

    @staticmethod
    def gen_sub_job_postlog(sub_job_array, file_path):
        log.info(f"gen_sub_job get sub_job_array: {sub_job_array}")
        log.info(f"step2-4 Sub-task splitting succeeded.sub-task num:{len(sub_job_array)}")
        exec_overwrite_file(file_path, sub_job_array)
        log.info("step2-4 end to gen_sub_job")

    @staticmethod
    def check_ret_body(ret, ret_body):
        if not ret:
            log.error("Failed start backup!")
            return False
        if ret_body.get("error_code"):
            log.error(f'Exec POST error with return: {ret_body.get("error_msg")}')
            return False
        return True

    @staticmethod
    def build_mount_paht_preffix(mount_path, repository_type):
        if repository_type == RepositoryType.LOG:
            mount_path = [s[:s.rfind('/')] for s in mount_path]
            log.info(f"Cur backup type is log, mount path: {mount_path}")
        return mount_path

    @staticmethod
    def get_repository_path(file_content, repository_type):
        repositories = file_content.get("job", {}).get("repositories", [])
        repositories_path = ""
        for repository in repositories:
            if repository['repositoryType'] == repository_type:
                repositories_path = repository["path"][0]
                log.info(f"repositories_path {repositories_path}")
                break
        if not check_path_valid(repositories_path):
            log.info(f"check_path_valid fail. repositories_path: {repositories_path}")
            return ""
        if not check_command_injection(repositories_path):
            log.info(f"String contains special characters!")
        return repositories_path

    @staticmethod
    def get_guass_job_id():
        return ""

    @staticmethod
    def get_params_by_key(param, json_const):
        param = param.get("job", {}).get("protectObject", {}).get("extendInfo", {}).get(json_const, "")
        if not param:
            log.error(f"Get param protectObject_extendInfo_json_const failed.")
        return param

    @staticmethod
    def set_error_response(response):
        response.code = GaussDBCode.FAILED.value
        response.body_err = GaussDBCode.FAILED.value

    @staticmethod
    def query_restore_time_file_relative_path(object_data_path):
        """
        功能描述：查询本次备份生成的可恢复时间
        cache_path：合成的文件要存放的路径
        object_data_path：被合成的对象数据文件存放路径
        """
        restore_time_file_path = ""
        db_file_list = get_all_db_files(object_data_path)
        file_system = ''
        if not db_file_list:
            log.error("No db file")
            return "", ""
        for db_file in db_file_list:
            temp_object_cur = sqlite3.connect(db_file).cursor()
            cmd = "select * from BsaObjTable where objectName like '%basic_recovery_time%'"
            object_tables = temp_object_cur.execute(cmd).fetchall()
            if not object_tables:
                log.error(f"Create dws table failed.")
                continue
            log.info("end to read line from source")
            restore_time_file_path = object_tables[0][14]
            file_system = object_tables[0][15]
            temp_object_cur.close()
            return restore_time_file_path, file_system
        return restore_time_file_path, file_system

    @staticmethod
    def read_xbsa_objects(restore_time_file_absolute_path):
        read_str = ""
        if not os.path.isfile(restore_time_file_absolute_path):
            raise Exception(f"File:{restore_time_file_absolute_path} not exist")
        try:
            with open(restore_time_file_absolute_path, "r", encoding='UTF-8') as f_content:
                read_str = f_content.read()
        except Exception as ex:
            raise Exception("parse param file failed") from ex
        lines = read_str.split("\n")
        return lines

    def set_logdetail(self):
        err_dict = LogDetail(logInfo='', logInfoParam=[], logTimestamp=0, logDetail=0, logDetailParam=[],
                             logDetailInfo=[], logLevel=3)
        err_dict.log_detail = self._err_code
        self._logdetail = []
        self._logdetail.append(err_dict)
        return True

    def set_log_detail_with_params(self, log_label, sub_job_id, err_code=None, log_detail_param=None,
                                   log_level=LogLevel.INFO.value):
        err_dict = LogDetail(logInfo=log_label,
                             logInfoParam=[sub_job_id],
                             logTimestamp=int(time.time()),
                             logDetail=err_code,
                             logDetailParam=log_detail_param,
                             logLevel=log_level)
        self._logdetail = []
        self._logdetail.append(err_dict)
        return True

    def get_progress(self):
        # 调用 工具生成的id 从meta仓获取 备份任务id
        job_file = os.path.join(self._cache_area, f"jobid_{self._job_id}")
        if not os.path.exists(job_file):
            log.error(f"copy info path not exist.")
            return {}
        backup_job_id = read_file(job_file)
        ret_body = HcsManager.get_job_progress(self._json_param_object, self._pid, backup_job_id)
        progress_info = ret_body.get("job", "")
        return progress_info

    def get_log_back_progress(self):
        backup_id_file_cahce = os.path.join(self._cache_area, "meta", self._copy_id, "backup_id")
        backup_id = read_file(backup_id_file_cahce)
        ret_body = HcsManager.get_log_backup_job_progress(self._json_param_object, self._pid,
                                                          self._instance_id, backup_id)
        progress_info = ret_body.get("backups", [])[0]
        return progress_info

    def get_back_history_info(self):
        ret_body = HcsManager.get_backup_jobs_info(self._json_param_object, self._pid, self._instance_id)
        if not ret_body:
            log.error(f"Failed get job info!")
            return []
        progress_info = ret_body.get("backups", [])
        return progress_info

    def get_log_comm(self):
        return f"pid:{self._pid} jobId:{self._job_id} subjobId:{self._sub_job_id}"

    def write_backup_progress(self):
        # 定时上报备份进度
        while self._backup_status == GaussBackupStatus.RUNNING or self._backup_status == GaussBackupStatus.BUILDING:
            # 没有进度文件可能是还没有生成,不返回失败
            if self.backup_type == BackupTypeEnum.LOG_BACKUP.value:
                progress_info = self.get_log_back_progress()
            else:
                progress_info = self.get_progress()
            self._backup_status = progress_info.get("status", "")
            if self._backup_status == GaussBackupStatus.RUNNING or self._backup_status == GaussBackupStatus.BUILDING:
                log.info("is running")
                status = SubJobStatusEnum.RUNNING
            elif self._backup_status == GaussBackupStatus.SUCCEED or self._backup_status == GaussBackupStatus.COMPLETED:
                status = SubJobStatusEnum.COMPLETED
                # 日志备份场景，需记录重启cn条件，并上报restore_time
                if self.backup_type == BackupTypeEnum.LOG_BACKUP.value:
                    self.upload_log_backup_info()
            else:
                status = SubJobStatusEnum.FAILED
            try:
                progress_msg = progress_info.get("job", "{}").get("progress", "0")
                progress = int(progress_msg[0:len(progress_msg) - 1])
            except Exception:
                log.error("Failed calculate progress")
                progress = 0
            log.info(f"status：{status}   progress: {progress}")
            job_info = self.get_job_info()
            write_progress_file_with_speed(job_info, status, progress)
            time.sleep(self._query_progress_interval)

    def get_job_info(self):
        job_info = JobInfo(pid=self._pid,
                           job_id=self._job_id,
                           sub_job_id=self._sub_job_id,
                           copy_id=self._copy_id,
                           sub_job_type=self._sub_job_name,
                           cache_path=self._cache_area,
                           instance_id=self._instance_id)
        return job_info

    def upload_backup_progress(self):
        # 定时上报备份进度
        while self._job_status == SubJobStatusEnum.RUNNING:
            progress_file = os.path.join(self._cache_area, f"progress_{self._job_id}_{self._sub_job_id}")
            # 没有进度文件可能是还没有生成,不返回失败
            comm_progress_dict = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                               taskStatus=SubJobStatusEnum.RUNNING,
                                               progress=0, logDetail=self._logdetail)
            if not os.path.exists(progress_file):
                report_job_details(self._job_id, comm_progress_dict.dict(by_alias=True))
                time.sleep(self._query_progress_interval)
                continue
            with open(progress_file, "r") as f_object:
                progress_dict = json.loads(f_object.read())
            self._job_status = progress_dict.get("taskStatus")
            if not self._job_status:
                log.error(f"Failed to obtain the task status.{self.get_log_comm()}")
                self._job_status = SubJobStatusEnum.FAILED
                fail_dict = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                          taskStatus=SubJobStatusEnum.FAILED, progress=100,
                                          logDetail=self._logdetail)
                progress_dict = fail_dict.dict(by_alias=True)

            report_job_details(self._job_id, progress_dict)
            time.sleep(self._query_progress_interval)

    def backup_task_subjob_dict(self):
        sub_job_dict = {
            GaussSubJobName.SUB_MOUNT: self.sub_job_mount,
            GaussSubJobName.SUB_EXEC: self.sub_job_exec,
            GaussSubJobName.SUB_UMOUNT: self.sub_job_umount,
            GaussSubJobName.SUB_MERGE_DB: self.sub_merge_db
        }
        return sub_job_dict

    def report_error(self, err, sub_job_name):
        self._job_status = SubJobStatusEnum.FAILED
        log.error(f"do sub job fail: {err}")
        log.error(f"Exec sub job {sub_job_name} failed.{self.get_log_comm()}.")
        error_message = ""
        if len(err.args) > 1:
            error_message = err.args[1]
        log_detail_param = []
        if sub_job_name == GaussSubJobName.SUB_EXEC:
            log_detail_param.append(self._instance_id)
            self._err_code = ErrorCode.ERR_BACKUP_RESTORE
            if len(err.args) > 0 and err.args[0] == "DBS.201202":
                self._err_code = ErrorCode.ERR_OTHER_TASK_IS_RUNNING
            else:
                log_detail_param.append(error_message)
        log_detail = LogDetail(logInfo="plugin_task_subjob_fail_label", logInfoParam=[self._sub_job_id],
                               logLevel=LogLevel.ERROR.value, logDetail=self._err_code,
                               logDetailParam=log_detail_param)

        report_job_details(self._pid,
                           SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=100,
                                         logDetail=[log_detail], taskStatus=SubJobStatusEnum.FAILED.value).dict(
                               by_alias=True))

    def backup_task(self):
        job_info = self.get_job_info()
        write_progress_file(job_info, SubJobStatusEnum.RUNNING, 0)
        # 启动一个线程查询备份进度
        sub_job_dict = self.backup_task_subjob_dict()
        progress_thread = threading.Thread(name='pre_progress', target=self.upload_backup_progress)
        progress_thread.daemon = True
        progress_thread.start()
        # 执行子任务
        sub_job_name = ResourceParam.get_sub_job_name(self._json_param_object)
        if not sub_job_name:
            return False
        self._sub_job_name = sub_job_name
        try:
            ret = sub_job_dict.get(sub_job_name)()
        except Exception as err:
            self.report_error(err, sub_job_name)
            return False
        if not ret:
            log.error(f"Exec sub job {sub_job_name} failed.{self.get_log_comm()}.")
            log_detail_param = []
            if sub_job_name == GaussSubJobName.SUB_EXEC:
                self._err_code = ErrorCode.ERR_BACKUP_RESTORE
                log_detail_param.append(self._instance_id)
            log_detail = LogDetail(logInfo="plugin_task_subjob_fail_label", logInfoParam=[self._sub_job_id],
                                   logLevel=LogLevel.ERROR.value, logDetail=self._err_code,
                                   logDetailParam=log_detail_param)

            report_job_details(self._pid,
                               SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=100,
                                             logDetail=[log_detail], taskStatus=SubJobStatusEnum.FAILED.value).dict(
                                   by_alias=True))
            return False
        log_detail = LogDetail(logInfo="plugin_task_subjob_success_label", logInfoParam=[self._sub_job_id], logLevel=1)
        report_job_details(self._pid, SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=100,
                                                    logDetail=[log_detail],
                                                    taskStatus=SubJobStatusEnum.COMPLETED.value).dict(by_alias=True))
        progress_thread.join()
        return True

    def build_sub_job(self, job_priority, policy, job_name, node_id=None):
        return SubJobModel(jobId=self._job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, execNodeId=node_id,
                           jobPriority=job_priority, jobName=job_name, policy=policy,
                           ignoreFailed=False).dict(by_alias=True)

    def gen_sub_job_prelog(self):
        # 录入备份开始时间
        start_time = str(int((time.time())))
        start_time_path = os.path.join(self._cache_area, f'T{self._job_id}')

        # 续作场景，不刷新任务开始时间
        if not os.path.exists(start_time_path):
            exec_overwrite_file(start_time_path, start_time, json_flag=False)
            log.info(f"Success to write backup start time {start_time} to {start_time_path}.")

        log.info("step2-4 start to gen_sub_job")
        file_path = os.path.join(ParamConstant.RESULT_PATH, f"result{self._pid}")
        nodes = self._json_param_object.get("job", {}).get("protectEnv", {}).get("nodes", [])
        log.info(f"gen_sub_job get nodes")
        return nodes, file_path

    def gen_sub_job(self):
        sub_job_array = []
        # 子任务1：挂载，所有节点执行
        job_policy = SubJobPolicy.EVERY_NODE_ONE_TIME.value
        job_name = GaussSubJobName.SUB_MOUNT
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_1.value
        sub_job = self.build_sub_job(job_priority, job_policy, job_name)
        sub_job_array.append(sub_job)

        # 子任务2：执行备份，单节点执行
        job_policy = SubJobPolicy.ANY_NODE.value
        job_name = GaussSubJobName.SUB_EXEC
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_2.value
        sub_job = self.build_sub_job(job_priority, job_policy, job_name)
        sub_job_array.append(sub_job)

        # 子任务3：解卸载
        job_policy = SubJobPolicy.EVERY_NODE_ONE_TIME.value
        job_name = GaussSubJobName.SUB_UMOUNT
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_3.value
        sub_job = self.build_sub_job(job_priority, job_policy, job_name)
        sub_job_array.append(sub_job)

        # 子任务4：合并xbsa对象数据
        job_policy = SubJobPolicy.ANY_NODE.value
        job_name = GaussSubJobName.SUB_MERGE_DB
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_4.value
        sub_job = self.build_sub_job(job_priority, job_policy, job_name)
        sub_job_array.append(sub_job)

        # 子任务5： 上报备份副本
        if self.backup_type != BackupTypeEnum.LOG_BACKUP:
            job_policy = SubJobPolicy.ANY_NODE.value
            job_name = GaussSubJobName.QUERY_COPY
            job_priority = SubJobPriorityEnum.JOB_PRIORITY_5.value
            sub_job = self.build_sub_job(job_priority, job_policy, job_name)
            sub_job_array.append(sub_job)
        file_path = os.path.join(ParamConstant.RESULT_PATH, f"result{self._pid}")
        self.gen_sub_job_postlog(sub_job_array, file_path)
        return True

    def get_last_full_copy_id(self):
        last_copy_info = self.get_last_full_copy_info()
        last_copy_id = last_copy_info.get("extendInfo", {}).get("copyId", "")
        log.info(f"get_last_full_copy_id: {last_copy_id}. {self.get_log_comm()}")
        return last_copy_id

    def get_last_copy_id(self):
        cache_path_parent = Path(self._cache_area).parent
        last_copy_id_file = os.path.join(cache_path_parent, 'last_copy_id')
        with open(last_copy_id_file, "r", encoding='utf-8') as copy_info:
            last_copy_id = copy_info.read().strip()
        log.info(f"get last_copy_id: {last_copy_id}")
        return last_copy_id

    def check_cn_rebuild(self):
        last_copy_id = self.get_last_full_copy_id()
        cache_path_parent = Path(self._cache_area).parent
        last_copy_id_file = os.path.join(cache_path_parent, last_copy_id, "meta", last_copy_id, "logBackup",
                                         "cnRebuildFlag")
        if not os.path.exists(last_copy_id_file):
            log.info(f"get cn_rebuild_flag is null")
            return False
        cn_rebuild_flag = read_file(last_copy_id_file)
        log.info(f"get cn_rebuild_flag:{cn_rebuild_flag}")
        if cn_rebuild_flag and cn_rebuild_flag == 1:
            log.info(f"get cn_rebuild_flag is True, convert to full backup")
            return True
        log.info(f"get cn_rebuild_flag is False, continue to diff backup")
        return False

    def check_backup_job_type(self):
        log.info("step2-2 start to sub_job_mount")

        # 当此次任务是差量备份，且之前没做过全量备份，需要差量转全量

        def check_last_copy_is_null():
            # 读取last_copy_info
            last_copy_info = self.get_last_full_copy_info()
            if len(last_copy_info) != 0:
                cache_path_parent = Path(self._cache_area).parent
                last_copy_id_file = os.path.join(cache_path_parent, 'last_full_copy_id')
                with open(last_copy_id_file, "r", encoding='utf-8') as copy_info:
                    last_copy_id = copy_info.read().strip()
                last_copy_info_id = last_copy_info.get("id", "")
                log.info(f"get last_copy_info_id {last_copy_info_id}, last_copy_id {last_copy_id}")
                if last_copy_info_id == last_copy_id:
                    return False
                else:
                    return True
            else:
                return True

        log.info(f'step 2-2: start execute check_backup_job_type, pid: {self._pid}, job_id:{self._job_id}')
        backup_type = self.backup_type
        log.info(f"check backup_type is {backup_type}")
        if not backup_type:
            return False
        if backup_type == BackupTypeEnum.FULL_BACKUP:
            return True
        if check_last_copy_is_null():
            response = ActionResponse(code=ExecuteResultEnum.INTERNAL_ERROR,
                                      bodyErr=ErrorCode.ERROR_INCREMENT_TO_FULL,
                                      message="Can not apply this type backup job")
            output_result_file(self._pid, response.dict(by_alias=True))
            log.info(f"change backup_type increment to full")
            return False
        response = ActionResponse(code=ExecuteResultEnum.SUCCESS)
        output_result_file(self._pid, response.dict(by_alias=True))
        log.info(f'step 2-2: finish execute check_backup_job_type, pid: {self._pid}, job_id:{self._job_id}')
        return True

    def create_path_for_cur_agent(self):
        """
        创建备份过程中所需要的目录
        @return:
        """
        # meta仓/meta/copy_id/objectmeta/host_ip
        meta_path_object_host = os.path.join(self._meta_area, 'meta', self._copy_id, 'objectmeta', self._host_ip)
        if not os.path.exists(meta_path_object_host):
            if not exec_mkdir_cmd(meta_path_object_host):
                return False
        self.change_meta_permission(meta_path_object_host)

        # 创建speed/{host_key} 目录
        cache_path_speed_host = os.path.join(self._cache_area, 'tmp', self._copy_id, 'speed', self._host_ip)
        if not os.path.exists(cache_path_speed_host):
            if not exec_mkdir_cmd(cache_path_speed_host):
                return False
        self.change_cache_tmp_permisson(cache_path_speed_host)
        return True

    def sub_job_mount(self):
        log.info("step2-5 start to sub_job_mount")
        try:
            self.create_path_for_cur_agent()
        except Exception as err:
            log.error(f"step2-5 start to sub_job_moun err: {err}")
        self.save_task_info_to_cache()
        self.save_cacheinfo_to_cache()
        log.info("step2-5 end to sub_job_mount")

        # 查询本节点的roach进程中配置的ip和端口，写入meta
        # 保存本节点roachAgent的id
        host, port = get_agent_roach_host_and_port()
        host_ip = host
        host_port = f"{host}:{port}"
        # 暂时写入日志
        log.info(f"step2-5-1 sub_job_mount {host_port}")
        host_port_info_file = os.path.join(self._meta_area, "hostInfo", self._job_id)
        # 文件夹不存在就创建
        if not os.path.exists(host_port_info_file):
            log.info(f"start to make dir {host_port_info_file}")
            exec_mkdir_cmd(host_port_info_file)

        host_port_file = os.path.join(self._meta_area, "hostInfo", self._job_id, f"host_port_{host_ip}")
        # 文件存在就删除
        if os.path.exists(host_port_file):
            log.info("start to remove File")
            os.remove(host_port_file)
        exec_overwrite_file(host_port_file, host_port)
        log.info(f"step2-5-1 end to exec_overwrite_file File {host_port_file}")
        job_info = self.get_job_info()
        write_progress_file(job_info, SubJobStatusEnum.COMPLETED, 100)
        return True

    def sub_job_set(self):
        log.info("start to sub_job_set_extend_info")
        # 读取挂载子任务生成的文件
        host_port_file_path = os.path.join(self._meta_area, "hostInfo", self._job_id)
        ips = ""
        ports = ""
        log.info(f"start to sub_job_set_extend_info host_port_file_path:{host_port_file_path}")
        lst = os.listdir(host_port_file_path)
        for i in lst:
            path_new = os.path.join(host_port_file_path, i)
            if os.path.isfile(path_new):
                # 读取文件
                host_port = read_file(path_new)
                log.info(f"step2-3-1 start to sub_job_set_extend_info host_port: {host_port}")
                host_ports = host_port.split(":")
                ips = f"{ips}{host_ports[0]},"
                ports = f"{ports}{host_ports[1]},"
        ips = ips[:-1]
        ports = ports[:-1]
        log.info(f"start to set extend info _instance_id:{self._instance_id}. ips {ips}. ports {ports}."
                 f" job id: {self._job_id}")
        # 调用设置扩展信息接口
        try:
            self._fun_inst.set_instance_extend_info_twice(self._instance_id, ips, ports, self.client_crt,
                                                          self._extend_info.get("xbsaConfPath", ""))
        except Exception as err:
            log.error(f"set_instance_extend_info_twice fail: {err}")
        # 设置证书文件
        return True

    def sub_job_exec_backup_timelog(self):
        # 录入备份开始时间
        message = str(int((time.time())))
        timepath = os.path.join(self._cache_area, f'T{self._job_id}')
        # 续作场景，不刷新任务开始时间
        if not os.path.exists(timepath):
            exec_overwrite_file(timepath, message, json_flag=False)
            log.info(f"Success to write start time {message} to {timepath}.")

    def sub_job_exec(self):
        log.info("step2-6 start to exec_back_up")
        backup_type = self.backup_type

        # 设置数据库配置策略
        if not self.set_db_conf(self._json_param_object):
            return self.upload_fail_status()
        log.info("GaussDB set configuration success.")

        # 组装备份请求体
        backup_body = self.get_backup_body(backup_type)

        # 设置扩展信息
        self.sub_job_set()

        # 发起备份
        ret_body = HcsManager.start_backup(self._json_param_object, self._pid, backup_body)
        log.info(f'Succeed start backup, return body {ret_body}')

        # 保存此次备份backup_id和job_id
        backup_id = self.save_backup_job_id_and_backup_id(ret_body)

        # 循环获取进度写入文件
        self.write_backup_progress()

        # 非日志备份需查询backKey并存储，留作日志备份
        if backup_type != BackupTypeEnum.LOG_BACKUP:
            self.save_backup_key(backup_type, backup_id)
        log.info(f"step2-6 end to exec_back_up, job id: {self._job_id}")

        # 保存全备副本ID与时间戳映射关系
        if self._backup_status == GaussBackupStatus.SUCCEED or self._backup_status == GaussBackupStatus.COMPLETED:
            if backup_type != BackupTypeEnum.FULL_BACKUP:
                log.info(f"Full backup no need record full backup list")
                return True

            # 记录全备、增备ID和时间戳映射关系列表
            self.save_full_copy_id_and_timestamp_relations()
            return True
        else:
            return False

    def save_full_copy_id_and_timestamp_relations(self):
        log.info("Start to save data backup ids")
        cache_path_parent = Path(self._cache_area).parent
        full_copy_id_and_timestamp_map_file = os.path.join(cache_path_parent, "full_copy_id_and_timestamp_maps")
        if os.path.exists(full_copy_id_and_timestamp_map_file):
            copy_id_list = read_file(full_copy_id_and_timestamp_map_file)
        else:
            copy_id_list = []
        copy_timestamp_map = {"copy_id": f"{self._job_id}", "time": int(time.time())}
        copy_id_list.append(copy_timestamp_map)
        log.info(f"Save copy_id_list: {copy_id_list} in {full_copy_id_and_timestamp_map_file}")
        exec_overwrite_file(full_copy_id_and_timestamp_map_file, copy_id_list)

    def get_backup_body(self, backup_type):
        if backup_type == BackupTypeEnum.FULL_BACKUP:
            backup_file_type = GaussBackupType.FULL
            backup_body = {
                "instance_id": self._instance_id, "name": set_backup_name(),
                "description": "mannual backup", "backup_type": backup_file_type
            }
        elif backup_type == BackupTypeEnum.LOG_BACKUP:
            backup_body = self.build_backup_body()
        else:
            backup_file_type = GaussBackupType.DIFF
            backup_body = {
                "instance_id": self._instance_id, "name": set_backup_name(),
                "description": "mannual backup", "backup_type": backup_file_type
            }
        return backup_body

    def upload_fail_status(self):
        log.error("GaussDB set configuration failed.")
        log_detail = LogDetail(logInfo="plugin_execute_gaussdb_conf_task_fail_label",
                               log_info_param=[self._sub_job_id],
                               logLevel=LogLevel.ERROR,
                               logDetail=ErrorCode.ERR_GAUSSDB_CONF)
        sub_dict = SubJobDetails(taskId=self._job_id,
                                 subTaskId=self._sub_job_id,
                                 progress=100,
                                 logDetail=[log_detail],
                                 taskStatus=SubJobStatusEnum.FAILED.value)
        report_job_details(self._pid, sub_dict.dict(by_alias=True))
        return False

    def build_backup_body(self):
        backup_file_type = GaussBackupType.LOG
        # 日志备份时，需要将上一次全备、上一次差备的backup_id存下来
        cache_path_parent = Path(self._cache_area).parent
        last_full_copy_id = self.get_last_full_copy_id()
        source_path = os.path.join(cache_path_parent, last_full_copy_id, "meta", last_full_copy_id, "backup_key")

        # 取backup key列表
        backup_ids = read_file(source_path)
        log.info(f"get backup_ids {backup_ids}")

        # 取此次日志备份关联的backup key列表
        depend_backup_ids = self.get_backup_ids(backup_ids, cache_path_parent)
        log.info(f"depend_backup_ids: {depend_backup_ids}. {self.get_log_comm()}")
        backup_body = {
            "instance_id": self._instance_id, "name": set_backup_name(),
            "description": "mannual backup", "backup_type": backup_file_type,
            "depend_backupIds": depend_backup_ids
        }
        log.info(f"sub_job_exec backup_body: {backup_body}. {self.get_log_comm()}")
        return backup_body

    def get_backup_ids(self, backup_ids, cache_path_parent):
        depend_backup_ids = []
        backup_key_file_archived_time = os.path.join(cache_path_parent, "archived_time")
        log.info(f"get backup_file_archived_time {backup_key_file_archived_time}")

        # 适配补丁、升级场景，full_copy_id_and_timestamp_map_file不存在，大小为1, 只传最近一次backup key
        full_copy_id_and_timestamp_map_file = os.path.join(cache_path_parent, "full_copy_id_and_timestamp_maps")
        copy_id_timestamp_map_list = []
        if os.path.exists(full_copy_id_and_timestamp_map_file):
            copy_id_timestamp_map_list = read_file(full_copy_id_and_timestamp_map_file)
        else:
            log.info(f"backup_key_file_archived_time not exists. {self.get_log_comm()}")
            return self.get_last_full_backup_id(backup_ids, depend_backup_ids)

        if len(copy_id_timestamp_map_list) <= 1:
            # 前面仅做过一次全备，返回; 存在升级场景，先推进最大可恢复时间到升级后全量备份。
            log.info(f"backup_key_file_archived_time not exists. {self.get_log_comm()}")
            return self.get_last_full_backup_id(backup_ids, depend_backup_ids)

        # 根据最大可恢复时间筛选backup key
        if os.path.exists(backup_key_file_archived_time):
            # 取此次日志备份依赖backup key列表，并过滤掉已删除的副本
            depend_backup_ids = self.get_depend_ids(backup_key_file_archived_time, depend_backup_ids)
            if len(depend_backup_ids) == 0:
                depend_backup_ids = self.get_last_full_backup_id(backup_ids, depend_backup_ids)
        else:
            log.info(f"backup_key_file_archived_time not exists. {self.get_log_comm()}")
            depend_backup_ids = self.get_last_full_backup_id(backup_ids, depend_backup_ids)
        log.info(f"depend_backup_ids: {depend_backup_ids}. {self.get_log_comm()}")
        return depend_backup_ids

    def get_last_full_backup_id(self, backup_ids, depend_backup_ids):
        backup_keys = backup_ids.split(',')
        for backup_key in backup_keys:
            backup_key_words = backup_key.split(' ')
            backup_key = backup_key_words[0]
            depend_backup_ids.append(f"{backup_key}")
        log.info(f"Get depend backup ids: {depend_backup_ids}. {self.get_log_comm()}")
        return depend_backup_ids

    def get_depend_ids(self, backup_key_file_archived_time, depend_backup_ids):
        # 获取最大可恢复时间
        archived_time = read_file(backup_key_file_archived_time)
        log.info(f"Get archived_time {archived_time}")
        last_backup_key = ''
        # 获取全量备份列表
        backup_history_info = self.get_back_history_info()
        log.info(f"Get backup history infos: {backup_history_info}")
        sorted_list = sorted(backup_history_info, key=lambda x: x["end_time"] if x["end_time"] is not None else "")
        for backup_info in sorted_list:
            log.info(f"Get backup_info {backup_info}")
            backup_key = backup_info.get("id", "")

            # 筛选状态是成功的
            log.info(f"Get backup_key: {backup_key}")
            status = backup_info.get("status", "")
            if status != 'COMPLETED' and status != 'Active':
                continue

            # 筛选备份时间完整的
            end_time = backup_info.get("end_time", "")
            if end_time is None:
                continue
            log.info(f"Get end_time: {end_time}")
            format_end_time = self.to_timestamp(end_time)
            log.info(f"Get format_time: {format_end_time}")
            if archived_time < format_end_time:

                # 录入已归档时间戳之后的所有backup_key
                depend_backup_ids.append(f"{backup_key}")
            else:
                last_backup_key = backup_key
        if last_backup_key != '':
            # 录入已归档时间戳之前的一个backup_key
            depend_backup_ids = [f"{last_backup_key}"] + depend_backup_ids
        return depend_backup_ids

    def to_timestamp(self, date_string):
        date_format = '%Y-%m-%dT%H:%M:%S%z'
        timestamp = datetime.strptime(date_string, date_format).timestamp()
        log.info(f"Get time stamp: {timestamp}. {self.get_log_comm()}")
        return timestamp

    def update_restore_time(self, time_body_info):
        json_copy = self.build_log_backup_copy_info(time_body_info)
        copy_info = {"copy": json_copy, "jobId": self._job_id}
        try:
            exec_rc_tool_cmd(self._job_id, RpcParamKey.REPORT_COPY_INFO, copy_info)
        except Exception as err_info:
            log.error(f"Report copy info fail.err: {err_info},{self.get_log_comm()}")
            return False
        log.info(f"Report copy info succ {copy_info}.{self.get_log_comm()}")
        return True

    def build_log_backup_copy_info(self, time_body_info):
        """
        查询数据备份集信息
        :return:
        """
        log.info(f"get time_body_info {time_body_info}")
        begin_time_range = time_body_info[0]
        begin_time = begin_time_range[0]
        end_time_range = time_body_info[len(time_body_info) - 1]
        end_time = end_time_range[len(end_time_range) - 1]
        log.info(f"get begin_time {begin_time}")
        log.info(f"get end_time {end_time}")

        # 读取归档时间
        cache_path_parent = Path(self._cache_area).parent
        archived_time_path = os.path.join(cache_path_parent, "archived_time")
        if os.path.exists(archived_time_path):
            archived_time = read_file(archived_time_path)
            log.info(f"Get show archived time: {archived_time}, begin time: {begin_time}")
            if int(begin_time) < int(archived_time):
                log.info("Show archive time is later than begin time, change begin time")
                begin_time = int(archived_time)

        out_put_info = {
            "extendInfo": {
                "backupTime": begin_time,
                "beginTime": begin_time,
                "endTime": end_time,
                "beginSCN": None,
                "endSCN": None,
                "backupset_dir": '',
                "backupSetName": "",
                "backupType": "",
                "copyId": self._copy_id,
                "baseBackupSetName": "",
                "dbName": "",
                "groupId": '',
                "tabal_space_info": [],
                "associatedCopies": [self._copy_id],
                "logDirName": self._data_area
            }
        }
        extend_info = out_put_info.get("extendInfo", {})
        time_range_info = {"timeRange": time_body_info}
        log.info(f"time_range_info: {time_range_info}")
        extend_info.update(time_range_info)
        log.info(f"build_log_backup_copy_info: {out_put_info}")

        return out_put_info

    def save_backup_job_id_and_backup_id(self, ret_body):
        self.sub_job_exec_backup_timelog()
        # 保存jobId
        job_file = os.path.join(self._cache_area, f"jobid_{self._job_id}")
        job_id = ret_body.get("job_id", "")
        if not ret_body.get("job_id", ""):
            log.error('Failed get job id!')
            return ""
        exec_overwrite_file(job_file, job_id)
        # 保存backupId
        backup_id = ret_body.get("backup", {}).get("id", "")
        # 存在cache仓，留作归档恢复用
        backup_id_file_cahce = os.path.join(self._cache_area, "meta", self._copy_id, "backup_id")
        # 存在meta仓，留作复制恢复用
        backup_id_file_meta = os.path.join(self._meta_area, "meta", self._copy_id, "backup_id")
        exec_overwrite_file(backup_id_file_cahce, backup_id)
        exec_overwrite_file(backup_id_file_meta, backup_id)
        log.info(f"step2-6-4 success to send backup request, jobId: {job_id}, backupId: {backup_id}")
        return backup_id

    def save_backup_key(self, backup_type, backup_key):
        # 日志备份backup_key, 时间戳联动关系说明
        # 1、每次全备、差备完成后，录入当前任务的backup_key及备份完成的时间戳。
        #
        # 2、日志备份完成后，计入当前已归档到的时间戳。
        #
        # 3、日志备份时，在组装日志备份接口参数backup_key列表时，需读取全备、差备backup_key和已归档到的时间戳。
        #
        # a.若读取不到已归档到的时间戳，将所有全备、差备backup_key置入backup_key列表
        #
        # b.若取到已归档到的时间戳, 且该时间戳之前无任何全备、差备backup_key，将所有全备、差备backup_key置入backup_key列表
        #
        # c.若取到已归档到的时间戳, 且该时间戳之前有backup_key, 将在该时间戳之前，离时间戳最近的backup_key， 置入backup_key列表

        log.info(f"save_backup_key backup_type: {backup_type}, backup_key: {backup_key}")
        finish_time = str(int((time.time())))
        backup_key_info = backup_key + ' ' + finish_time
        if backup_type == BackupTypeEnum.FULL_BACKUP:
            backup_key_file_meta = os.path.join(self._cache_area, "meta", self._copy_id, "backup_key")
            exec_overwrite_file(backup_key_file_meta, backup_key_info)
        # 差备在全备的基础上，添加backupKey
        elif backup_type == BackupTypeEnum.DIFF_BACKUP:
            last_full_copy_id = self.get_last_full_copy_id()
            cache_path_parent = Path(self._cache_area).parent
            backup_key_file_meta = os.path.join(cache_path_parent, last_full_copy_id, "meta",
                                                last_full_copy_id, "backup_key")
            previous_backup_keys = read_file(backup_key_file_meta)
            backup_key_info = previous_backup_keys + "," + backup_key_info
            log.info(f"save_backup_key backup_key_info: {backup_key_info}. {self.get_log_comm()}")
            exec_overwrite_file(backup_key_file_meta, backup_key_info)

    def sub_job_umount(self):
        log.info("step2-7 start to sub_job_umount")
        # 解挂载子任务，直接返回成功
        job_info = self.get_job_info()
        write_progress_file(job_info, SubJobStatusEnum.COMPLETED, 100)
        log.info("step2-7 end to sub_job_umount")
        return True

    def sub_merge_db(self):
        log.info("start to sub_merge_db")
        self.merge_sqlite()
        log.info("end to sub_merge_db")
        job_info = self.get_job_info()
        write_progress_file(job_info, SubJobStatusEnum.COMPLETED, 100)
        return True

    def merge_sqlite(self):
        # 如果此次日志备份并未实际产生备份数据库，跳过合并sqlite
        backup_no_send_data_flag_path = os.path.join(self._cache_area, "meta", self._copy_id, "no_send_data_flag")
        if os.path.exists(backup_no_send_data_flag_path):
            log.info("Skip merge sqlite, no send data in backup")
            return

        # 合并此次以及上一次全备到现在为止所有的sqlite
        log_target_path = self.merge_cur_and_past_sqlite()

        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            # 全量备份，生成restoreMergeSqlite, 这个sqlite记录当前全量备份到下一次全量备份间的所有xbsa对象记录
            restore_part_path = os.path.join(self._cache_area, "meta", self._copy_id, "restoreMergeSqlite")
            self.copy_mergedb(log_target_path, restore_part_path)
            log.info("Full backup, merge cur sqlite")
            return

        # 增量备份、日志备份场景下，在最近一次全量备份meta目录中记录最新的xbsa对象合集sqlite, 用于下次备份找到上次备份的xbsa对象集
        last_full_copy_id = self.get_last_full_copy_id()
        cache_path_parent = Path(self._cache_area).parent
        restore_part_path = os.path.join(cache_path_parent, last_full_copy_id, "meta", last_full_copy_id,
                                         "restoreMergeSqlite")
        self.copy_mergedb(log_target_path, restore_part_path)

        if self.backup_type != BackupTypeEnum.LOG_BACKUP:
            log.info("Data backup, merge past fill sqlite")
            return

        # 日志备份场景下，在当前meta仓中记录恢复依赖xbsa对象合集sqlite, 用于按时间恢复场景
        restore_path = os.path.join(self._cache_area, "meta", self._copy_id, "restoreSqlite")
        self.copy_mergedb(log_target_path, restore_path)

        # 构造/meta/copyId/restoreSqlite, 内容是用这个日志副本按时间恢复时，合并这个日志副本依赖副本的sqlite
        # 1、获取当前日志副本依赖的全量备份ID列表，排除第一个，也就是当前的全量副本ID
        log_associate_full_copy_id_path = os.path.join(self._cache_area, "log_associate_exist_full_copy_id")
        if os.path.exists(log_associate_full_copy_id_path):
            copy_id_list = read_file(log_associate_full_copy_id_path)
        else:
            log.info("Log backup merge sqlite, not find associate full copy id")
            return

        log.info(f"Log back up report copy id list {copy_id_list}")

        # 2、合并这些日志副本列表，如果遇到相同名字的文件, 默认新的覆盖旧的，除了采用追加写的barrier文件
        restore_merge_sqlite_path = os.path.join(self._cache_area, "meta", self._copy_id, "restoreSqlite",
                                                 "backup_merge")
        for copy_id in copy_id_list:
            # 当前全量副本做过合并，不再合并
            if copy_id == last_full_copy_id:
                log.info("Skip merge cur backup sqlite")
                continue
            restore_part_path = os.path.join(cache_path_parent, copy_id, "meta", copy_id,
                                             "restoreMergeSqlite")
            if not os.path.exists(restore_part_path):
                log.info(f"Past backup sqlite exist, skip: {restore_part_path}")
                continue
            aggregate_single_copy_object_data(restore_merge_sqlite_path, restore_part_path, self.get_repo_list(), False)

    def merge_cur_and_past_sqlite(self):
        # 日志备份合并目标路径
        log_target_path = os.path.join(self._cache_area, "meta", self._copy_id, "logdb", "backup_merge")  # 聚合后的路径
        if not os.path.exists(log_target_path):
            if not exec_mkdir_cmd(log_target_path):
                return False
        # 合并本次任务生成的xbsa对象
        source_path = os.path.join(self._meta_area, "meta", self._copy_id, "objectmeta")
        aggregate_single_copy_object_data(log_target_path, source_path, self.get_repo_list(), False)
        log.info(f"merge cur backup task db success")
        # 非全量备份场景，合并上次备份xbsa表
        if self.backup_type != BackupTypeEnum.FULL_BACKUP:
            self.merge_past_xbsa_to_cur_xbsa(log_target_path)
        # meta仓下xbsa表路径
        logdb_meta_path = os.path.join(self._meta_area, "meta", self._copy_id, "logdb")
        self.copy_mergedb(log_target_path, logdb_meta_path)
        log.info("step2-5 end to sub_merge_db")
        return log_target_path

    def copy_mergedb(self, mergedb_cache_path, mergedb_meta_path):
        if not os.path.exists(mergedb_meta_path):
            exec_mkdir_cmd(mergedb_meta_path)
        exec_cp_cmd(mergedb_cache_path, mergedb_meta_path, 'root')
        log.info(f"Copy success, source: {mergedb_cache_path}. target: {mergedb_meta_path} {self.get_log_comm()}")

    def merge_past_xbsa_to_cur_xbsa(self, target_path):
        # 将past_copy_id目录下的过往xbsa表，合并至本次xbsa表中
        last_full_copy_id = self.get_last_full_copy_id()
        cache_path_parent = Path(self._cache_area).parent
        restore_part_path = os.path.join(cache_path_parent, last_full_copy_id, "meta", last_full_copy_id,
                                         "restoreMergeSqlite")
        if os.path.exists(restore_part_path):
            aggregate_single_copy_object_data(target_path, restore_part_path, self.get_repo_list(), False)
        else:
            # 适配升级场景, 从logdb目录下取上次的sqlite用于合并
            past_copy_id = self.get_last_copy_id()
            mount_path_parent = Path(self._cache_area).parent
            last_source_path = os.path.join(mount_path_parent, past_copy_id, "meta", past_copy_id, "logdb")
            aggregate_single_copy_object_data(target_path, last_source_path, self.get_repo_list(), False)
            log.info(f"merge_last_full_xbsa_db success")
        log.info(f"merge past xbsa sqlite success")

    def save_last_id(self):
        # 记录上一次copy_id
        cache_path_parent = Path(self._cache_area).parent
        last_copy_id_file = os.path.join(cache_path_parent, 'last_copy_id')
        exec_overwrite_file(last_copy_id_file, self._copy_id, json_flag=False)
        log.info(f"Save all copy id {self._copy_id}")

        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            # 记录全备ID,覆盖写
            cache_path_parent = Path(self._cache_area).parent
            last_copy_id_file = os.path.join(cache_path_parent, 'last_full_copy_id')
            exec_overwrite_file(last_copy_id_file, self._copy_id, json_flag=False)
            log.info(f"Save full copy id {self._copy_id}")

    def merge_last_full_xbsa_to_backdb(self, target_path):
        # 由于差异副本恢复需要全备+差备xbsa表，差异备份时，将上一次全备xbsa表合并至此次任务xbsa结果中
        last_full_copy_id = self.get_last_full_copy_id()
        mount_path_parent = Path(self._cache_area).parent
        last_source_path = os.path.join(mount_path_parent, last_full_copy_id, "meta", last_full_copy_id, "mergedb")
        aggregate_single_copy_object_data(target_path, last_source_path, self.get_repo_list(), False)
        log.info(f"merge_last_full_xbsa_db success")

    def merge_last_diff_xbsa_to_logdb(self, target_path):
        # 由于日志备份恢复需要已归档时间点前所有备份的xbsa表，差异备份或日志备份时，将上一次全备xbsa表合并至此次任务xbsa结果中
        log_copy_id = self.get_last_copy_id()
        mount_path_parent = Path(self._cache_area).parent
        last_source_path = os.path.join(mount_path_parent, log_copy_id, "meta", log_copy_id, "logdb")
        aggregate_single_copy_object_data(target_path, last_source_path, self.get_repo_list(), False)
        log.info(f"merge_last_diff_xbsa_db success")

    def copy_mergedb_from_cache_to_meta(self, mergedb_cache_path):
        mergedb_meta_path = os.path.join(self._meta_area, "meta", self._copy_id, "mergedb")
        if not os.path.exists(mergedb_meta_path):
            exec_mkdir_cmd(mergedb_meta_path)
        exec_cp_cmd(mergedb_cache_path, mergedb_meta_path, 'root')

    def copy_log_mergedb_from_cache_to_meta(self, mergedb_cache_path):
        mergedb_meta_path = os.path.join(self._meta_area, "meta", self._copy_id, "logdb")
        if not os.path.exists(mergedb_meta_path):
            exec_mkdir_cmd(mergedb_meta_path)
        exec_cp_cmd(mergedb_cache_path, mergedb_meta_path, 'root')

    def sub_query_copy(self):
        """
        功能描述：查询备份副本, 备份成功, 执行后置任务之前执行, 备份失败不会调用
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'execute to query_backup_copy, pid: {self._pid}, job_id: {self._job_id}')
        copy_info = {}
        repositories = []
        copy_id = ""
        nodes = []
        try:
            file_content = self._json_param_object
        except Exception as ex:
            log.error(ex, exc_info=True)
        try:
            nodes = file_content['job']['protectEnv']['nodes']
        except Exception as ex:
            log.error(ex, exc_info=True)
        try:
            copy_id = file_content['job']['copy'][0]['id']
        except Exception as ex:
            log.error(ex, exc_info=True)
        try:
            repositories = file_content['job']['repositories']
        except Exception as ex:
            log.error(ex, exc_info=True)

        report_repositories = []
        for repository in repositories:
            repository_type = int(repository['repositoryType'])
            if repository_type == RepositoryDataTypeEnum.META_REPOSITORY.value or \
                    repository_type == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                repository['remotePath'] = f"{repository['remotePath']}/{copy_id}"
                report_repositories.append(repository)
        copy_info['repositories'] = report_repositories
        copy_info['extendInfo'] = {'nodes': nodes, 'copyId': copy_id}

        output_result_file(self._pid, copy_info)
        return True

    def backup_prerequisite_progress(self):
        """
        根据是否有进度文件 来判断前置任务检查结果
        @return:
        """
        # 当前根据是否存在RestorePrerequisiteProgress文件来上报前置任务进度
        log.info(f"step 1-4 start to upload backup prepare job progress ")
        job_status = SubJobStatusEnum.COMPLETED.value
        file_path = os.path.join(self._cache_area, "BackupPrerequisiteProgress")
        fail_file_path = os.path.join(self._cache_area, "BackupPrerequisiteFailProgress")
        progress = 100
        if not os.path.exists(file_path):
            log.info("backup_prerequisite_progress is running")
            job_status = SubJobStatusEnum.RUNNING.value
            cache_info_path = os.path.join(RoachConstant.XBSA_FILE, f"hcs_pre_backup_failed_{self._job_id}.txt")
            if os.path.exists(cache_info_path):
                job_status = SubJobStatusEnum.FAILED.value
            progress = 0
        if os.path.exists(fail_file_path):
            log.info("backup_prerequisite_progress is fail")
            job_status = SubJobStatusEnum.FAILED.value
            progress = 0
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                               taskStatus=job_status, progress=progress, logDetail=self._logdetail)
        output_result_file(self._pid, output.dict(by_alias=True))

    def backup_post_job_progress(self):
        """
        根据是否有进度文件 来判断后置任务检查结果
        @return:
        """
        # 当前根据是否存在BackupPostProgress文件来上报前置任务进度
        log.info(f"Start to upload backup post job progress...")
        job_status = SubJobStatusEnum.COMPLETED.value
        file_path = os.path.join(self._cache_area, "BackupPostProgress")
        fail_file_path = os.path.join(self._cache_area, "BackupPostProgressFailProgress")
        progress = 100
        if not os.path.exists(file_path):
            log.info("backup_prerequisite_progress is running")
            job_status = SubJobStatusEnum.RUNNING.value
            progress = 0
        if os.path.exists(fail_file_path):
            log.info("backup_prerequisite_progress is fail")
            job_status = SubJobStatusEnum.FAILED.value
            progress = 0
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                               taskStatus=job_status, progress=progress, logDetail=self._logdetail)
        output_result_file(self._pid, output.dict(by_alias=True))

    def get_cache_path(self):
        return self._cache_area

    def backup_pre_job(self):
        """
        前置任务:
        1、创建目录
        1、写入cache仓下/tmp/copy_id/taskInfo_{hostKey}.txt
        2、写入DataBackup下/stmp/dws_cacheInfo.txt
        3、创建 DB数据库
        @return:
        """
        log.info(f"step2-3 start to backup pre job, job_id: {self._job_id}")
        log.info(f"Get backup param: {self._json_param_object}")
        self.create_path_for_backup()
        self.save_business_config()
        self.create_host_db()
        self.check_archived_time()
        cache_path_parent = Path(self._cache_area).parent

        # 全量备份前置任务跳过合并
        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            # 存储备份文件系统至meta仓
            self.save_repo_relations_to_meta()
            log.info("success to save_repo_relations_to_meta")
            return True

        last_backup_record = self.get_last_backup_record(cache_path_parent)
        self.merge_last_backup_xbsa_table(last_backup_record)
        log.info("step2-3 end to merge past sqlite")

        # 非日志备份直接返回
        if self.backup_type != BackupTypeEnum.LOG_BACKUP:
            log.info("Data backup no need to merge sqlite")
            return True

        # 读取归档时间
        ret, archived_time = self.read_archive_time(cache_path_parent, last_backup_record)
        if not ret:
            log.info("Read archived time failed, end pre job")
            return True
        log.info(f"Read archived time success: {archived_time}")

        # 读取全备与时间戳关联记录
        ret, copy_id_timestamp_map_list = self.read_full_copy_id_list(cache_path_parent)
        if not ret:
            log.info("Read full copy id list failed, end pre job")
            return True
        log.info(f"Read copy and backup time relation success: {copy_id_timestamp_map_list}")

        # 截取最大可恢复时间前一次备份，以及后续所有副本ID，按时间倒序排列
        copy_id_list = self.get_copy_id_list(archived_time, copy_id_timestamp_map_list)
        log.info(f"get copy id list: {copy_id_list}")
        if len(copy_id_list) <= 1:
            # 仅关联一个备份副本，已做过sqiite合并，无需再做一次
            log.info(f"Copy id list length is single: {copy_id_list}")
            return True

        # 合并过往数据库备份sqlite文件
        self.merge_past_data_copy_sqlite(cache_path_parent, copy_id_list)

        # 记录该次日志备份依赖的全量备份副本ID，补丁后，需做一次全量备份、日志备份使这个逻辑生效
        log_associate_full_copy_id_path = os.path.join(self._cache_area, "log_associate_full_copy_id")
        exec_overwrite_file(log_associate_full_copy_id_path, copy_id_list)
        log.info(f"Write log associated copy id {copy_id_list} to {log_associate_full_copy_id_path}")
        return True

    def get_last_backup_record(self, cache_path_parent):
        # 合并backupkey.db文件，增量在前置任务合并上一次副本的backupkey.db
        last_full_copy_id = self.get_last_full_copy_id()
        last_backup_record = os.path.join(cache_path_parent, last_full_copy_id, "meta", last_full_copy_id,
                                          "restoreMergeSqlite")
        # 适配升级场景, 从logdb目录下取数据
        if not os.path.exists(last_backup_record):
            past_copy_id = self.get_last_copy_id()
            mount_path_parent = Path(self._cache_area).parent
            last_backup_record = os.path.join(mount_path_parent, past_copy_id, "meta", past_copy_id, "logdb")
            log.info(f"Merge past backup source path not exist")
        return last_backup_record

    def read_archive_time(self, cache_path_parent, last_backup_record):
        # 读取归档时间
        archived_time_path = os.path.join(cache_path_parent, "archived_time")
        log.info(f"Archived time path is {archived_time_path}")
        if os.path.exists(archived_time_path) and os.path.exists(last_backup_record):
            archived_time = read_file(archived_time_path)
        else:
            # 首次日志备份，只关联最近一次全量备份，返回，兼容升级场景
            return False, ''
        log.info(f"get archived_time: {archived_time}. {self.get_log_comm}")
        return True, archived_time

    def read_full_copy_id_list(self, cache_path_parent):
        full_copy_id_and_timestamp_map_file = os.path.join(cache_path_parent, "full_copy_id_and_timestamp_maps")
        copy_id_timestamp_map_list = []
        if os.path.exists(full_copy_id_and_timestamp_map_file):
            copy_id_timestamp_map_list = read_file(full_copy_id_and_timestamp_map_file)
        else:
            # 兼容升级场景, 没副本关联信息直接返回
            log.info("Not find copy id and timestamp maps")
            return False, []
        if len(copy_id_timestamp_map_list) <= 1:
            # 前面仅做过一次全备，返回; 存在升级场景，先推进最大可恢复时间到升级后全量备份。
            log.info("Copy id timestamp length is single")
            return False, []
        log.info(f"Find copy id and timestamp maps: {copy_id_timestamp_map_list}. {self.get_log_comm}")
        return True, copy_id_timestamp_map_list

    def merge_past_data_copy_sqlite(self, cache_path_parent, copy_id_list):
        last_full_copy_id = self.get_last_full_copy_id()
        for copy_id in copy_id_list:
            # 当前全量副本做过合并，不再合并
            if copy_id == last_full_copy_id:
                log.info(f"Last full copy {last_full_copy_id} is merged. no need to merge again")
                continue

            # 合并backupkey.db文件, 合并日志备份副本关联的所有副本的backupkey.db
            source_path = os.path.join(cache_path_parent, copy_id, "meta", copy_id, "restoreMergeSqlite")
            if not os.path.exists(source_path):
                log.info(f"Source db path not exist: {source_path}, skip merge {copy_id}")
                continue
            log.info(f"Source db path exist: {source_path}, start merge to cur sqlite {copy_id}")
            self.merge_last_backup_xbsa_table(source_path)

    def get_copy_id_list(self, archived_time, copy_id_timestamp_map_list):
        copy_id_list = []
        copy_id_timestamp_map_list.reverse()
        for copy_id_timestamp_map in copy_id_timestamp_map_list:
            timestamp = copy_id_timestamp_map.get("time", "")
            copy_id = copy_id_timestamp_map.get("copy_id", "")
            log.info(f"Get timestamp: {timestamp}, copy id: {copy_id}")
            copy_id_list.append(copy_id)
            # 全量副本时间大于已归档到时间
            if timestamp < archived_time:
                break
        log.info(f"Get copy id list: {copy_id_list}. {self.get_log_comm}")
        return copy_id_list

    def check_archived_time(self):
        # 数据库发生归档空间溢出、恢复、手动重启归档槽场景时，归档槽将重启，日志备份的backup_key参数需从上一次全备获取
        # 仅日志备份需处理重启归档场景：
        if self.backup_type != BackupTypeEnum.LOG_BACKUP.value:
            log.info("Only log backup need check archived time")
            return

        # 识别是否重启归档
        backup_sla = self._json_param_object.get("job", {}).get("extendInfo", {}).get("backupTask_sla")
        backup_policy = json.loads(backup_sla).get("policy_list")[0]
        log.info(f"Get backup policy {backup_policy}")
        restart_archive_flag = backup_policy.get("ext_parameters").get("restart_archive", False)
        log.info(f"Get restart archive flag {restart_archive_flag}")
        if not restart_archive_flag:
            return

        # 删除最大可恢复时间文件，后续计算backup_key参数时，将从上一次全备开始计算
        cache_path_parent = Path(self._cache_area).parent
        show_archived_time_path = os.path.join(cache_path_parent, "show_archived_time")
        archived_time_path = os.path.join(cache_path_parent, "archived_time")
        if os.path.exists(show_archived_time_path):
            log.info(f"start to remove show archived time file: {show_archived_time_path}")
            os.remove(show_archived_time_path)
        if os.path.exists(archived_time_path):
            log.info(f"start to remove archived time file: {archived_time_path}")
            os.remove(archived_time_path)

    def save_repo_relations_to_meta(self):
        repositories = self._json_param_object.get("job", {}).get("repositories", [])
        relations = []
        for rep in repositories:
            repository_type = rep.get("repositoryType")
            if repository_type != 1:
                continue
            device_sn = rep.get("extendInfo").get("esn")
            fs_id = rep.get("extendInfo").get("fsId")
            role = rep.get("role")
            remote_path = rep.get("remotePath", "").strip("/").split("/")[0]
            cur_relations = [
                {
                    'oldEsn': device_sn,
                    'oldFsId': fs_id,
                    'oldFsName': remote_path,
                    'role': role
                }
            ]
            relations = relations + cur_relations
        repo_relations_path = os.path.join(self._meta_area, 'meta', self._copy_id, 'repoRelations')
        exec_overwrite_file(repo_relations_path, relations)
        log.info(f"save_repo_relations_to_meta {relations} to {repo_relations_path}")

    def merge_last_backup_xbsa_table(self, source_path):
        # 组装xbsa对象表目标路径
        target_path = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta")  # 聚合后的路径
        if not os.path.exists(target_path):
            log.info(f"Merge target path not exist, start to make dir: {target_path}")
            if not exec_mkdir_cmd(target_path):
                log.info(f"Merge target dir failed")
                return
        # 合并backupkey.db文件，增量在前置任务合并上一次副本的backupkey.db
        try:
            ret = aggregate_single_copy_object_data(target_path, source_path, self.get_repo_list(), True)
        except Exception as err:
            log.error(f"Aggregate object data failed. main task:{self._job_id}, err: {err}")
            return
        if not ret:
            log.error(f"Aggregate object data failed. main task:{self._job_id}")
            return

    def create_path_for_backup(self):
        """
        创建过程中所需要的目录
        @return:
        """
        meta_path_object_host = os.path.join(self._meta_area, 'meta', self._copy_id, 'objectmeta')

        if not os.path.exists(meta_path_object_host):
            if not exec_mkdir_cmd(meta_path_object_host):
                return False
        self.change_meta_permission(meta_path_object_host)

        cache_path_object_host = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta")
        if not os.path.exists(cache_path_object_host):
            if not exec_mkdir_cmd(cache_path_object_host):
                return False
        self.change_cache_meta_permission(cache_path_object_host)

        # 创建speed/{host_key} 目录
        cache_path_speed_host = os.path.join(self._cache_area, 'tmp', self._copy_id, 'speed', self._host_ip)
        if not os.path.exists(cache_path_speed_host):
            if not exec_mkdir_cmd(cache_path_speed_host):
                return False
        self.change_cache_tmp_permisson(cache_path_speed_host)
        return True

    def get_cur_task_ip(self):
        cur_task_ip = ""
        host_ip = extract_ip()
        log.info(f"get host_ip： {host_ip}")
        log.info(f"get end_point list: {self._end_point}")
        task_ip_list = self._end_point.split(',')
        for task_ip in task_ip_list:
            if task_ip in host_ip:
                cur_task_ip = task_ip
        log.info(f"get cur task ip: {cur_task_ip}")
        if cur_task_ip:
            return cur_task_ip
        else:
            cur_task_ip, port = get_agent_roach_host_and_port()
            log.info(f"cur task ip is empty, use backupAgent ip: {cur_task_ip}")
            return cur_task_ip

    def change_cache_tmp_permisson(self, cache_path_speed_host):
        set_user_and_group(cache_path_speed_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_speed_host, PermissionNode.PERMISSION_700)
        # cache仓/tmp/copy_id/speed
        cache_path_speed = os.path.join(self._cache_area, 'tmp', self._copy_id, 'speed')
        set_user_and_group(cache_path_speed, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_speed, PermissionNode.PERMISSION_700)
        # cache仓/tmp
        meta_path_tmp = os.path.join(self._cache_area, 'tmp')
        set_user_and_group(meta_path_tmp, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_tmp, PermissionNode.PERMISSION_750)
        # cache仓/tmp/copy_id
        meta_path_tmp_copy_id = os.path.join(self._cache_area, 'tmp', self._copy_id)
        set_user_and_group(meta_path_tmp_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_tmp_copy_id, PermissionNode.PERMISSION_750)

    def change_cache_meta_permission(self, cache_path_object_host):
        set_user_and_group(cache_path_object_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_object_host, PermissionNode.PERMISSION_700)
        # cache仓
        meta_path = os.path.join(self._cache_area)
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # cache仓/meta
        meta_path = os.path.join(self._cache_area, 'meta')
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # cache仓/meta/copy_id
        meta_path_copy_id = os.path.join(self._cache_area, 'meta', self._copy_id)
        set_user_and_group(meta_path_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_copy_id, PermissionNode.PERMISSION_750)
        # cache仓/meta/copy_id/objectmeta
        meta_path_objectmeta = os.path.join(self._cache_area, 'meta', self._copy_id, 'objectmeta')
        set_user_and_group(meta_path_objectmeta, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_objectmeta, PermissionNode.PERMISSION_700)

    def change_meta_permission(self, meta_path_object_host):
        set_user_and_group(meta_path_object_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_object_host, PermissionNode.PERMISSION_700)
        # meta仓
        meta_path = os.path.join(self._meta_area)
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # meta仓/meta
        meta_path = os.path.join(self._meta_area, 'meta')
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # meta仓/meta/copy_id
        meta_path_copy_id = os.path.join(self._meta_area, 'meta', self._copy_id)
        set_user_and_group(meta_path_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_copy_id, PermissionNode.PERMISSION_750)
        # meta仓/meta/copy_id/objectmeta
        meta_path_objectmeta = os.path.join(self._meta_area, 'meta', self._copy_id, 'objectmeta')
        set_user_and_group(meta_path_objectmeta, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_objectmeta, PermissionNode.PERMISSION_700)

    def save_task_info_to_cache(self):
        """
            功能描述：写入cache仓下/tmp/copy_id/taskInfo_{hostKey}.txt
            参数：
            返回值: True or False
        """
        task_info = {
            "repositories": self.get_repo_list(),
            "taskType": TaskTypeEnum.BACKUP.value,
            "copyType": self.backup_type
        }
        log.info(f"save_task_info_to_cache: {task_info}. {self.get_log_comm()}")
        task_path = os.path.join(self._cache_area, 'tmp', self._copy_id, f'taskInfo_{self._host_ip}.txt')
        exec_overwrite_file(task_path, task_info)
        if not os.path.exists(task_path):
            # 暂时写入日志
            log.error(f"Create taskInfo_{self._host_ip}.txt file failed.")
            return False
        set_user_and_group(task_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(task_path, PermissionNode.PERMISSION_400)
        return True

    def save_cacheinfo_to_cache(self):
        """
            功能描述：写入DataBackup下/stmp/dws_cacheInfo.txt
            参数：
            返回值: True or False
        """
        # 写cache仓路径到tmp目录， 每个节点 各享一个
        cache_info = {
            "cacheRepoPath": self._cache_area,
            "metaRepoPath": self._meta_area,
            "copyId": self._copy_id,
            "taskId": self._job_id,
            "hostKey": self._host_ip
        }
        cache_info_path = os.path.join(RoachConstant.XBSA_FILE, f"xbsa_cacheInfo_info_hcs_{self._instance_id}.txt")
        exec_overwrite_file(cache_info_path, cache_info)
        set_user_and_group(cache_info_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_info_path, PermissionNode.PERMISSION_400)
        if not os.path.exists(cache_info_path):
            log.error("Create dws_cacheInfo.txt failed.")
            return False
        return True

    def save_business_config(self):
        """
            功能描述：写入DataBackup下business_config, 传递任务类型
            参数：
            返回值: True or False
        """
        # 写cache仓路径到tmp目录， 每个节点 各享一个
        backup_file_type = 0
        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            backup_file_type = BusinessConfigType.FULL_BACKUP_TYPE
        else:
            backup_file_type = BusinessConfigType.INCREMENT_BACKUP_TYPE
        business_type_info_dir = os.path.join(self._cache_area, "tmp", self._copy_id)
        if not os.path.exists(business_type_info_dir):
            exec_mkdir_cmd(business_type_info_dir)
        business_type_info = {"jobType": backup_file_type}
        business_type_info_path = os.path.join(self._cache_area, "tmp",
                                               self._copy_id, RoachConstant.BUSINESS_CONFIG_FILE)
        exec_overwrite_file(business_type_info_path, business_type_info)
        set_user_and_group(business_type_info_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(business_type_info_path, PermissionNode.PERMISSION_400)
        log.info(f"success to save_business_config, jobType: {backup_file_type}")
        if not os.path.exists(business_type_info_path):
            log.error("Create dws_cacheInfo.txt failed.")
            return False
        return True

    def get_repo_list(self):
        """
        功能描述： 从任务参数中解析出x8000和文件系统的对应关系
        参数：
        @job_info: JobInfo 任务信息
        返回值： list
        """
        res_list = []
        repositories = self._json_param_object.get("job", {}).get("repositories", [])
        log.info(f"get get_repo_list {repositories}")
        # 默认传给xbsa仓库路径为data仓地址
        useful_repo = [1]
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            # 日志备份传给xbsa仓库路径为log仓地址
            useful_repo = [1, 3]
        for rep in repositories:
            repository_type = rep.get("repositoryType")
            if repository_type not in useful_repo:
                continue
            role = rep.get("role")
            device_sn = rep.get("extendInfo").get("storage_info", {}).get("storage_device")
            log.info(f"sub_job_id: {self._sub_job_id}, device_sn: {device_sn}")
            mount_path = rep.get("path")
            log.info(f"Get type {repository_type} repo , mount_path is : {mount_path}")
            # 日志备份场景下，给xbsa的mountpath取父路径，恢复时xbsa可据xbsa文件系统表查询多次日志备份副本
            mount_path = self.build_mount_paht_preffix(mount_path, repository_type)
            log.info(f"Cur backup type is log, mount path: {mount_path[0]}")
            fs_dict = {
                "id": rep.get("extendInfo", {}).get("fsId", ""),
                "name": rep.get("remotePath", "").strip("/").split("/")[0],
                "sharePath": rep.get("remotePath", "").strip("/"),
                "mountPath": mount_path
            }

            # 判断当前文件系统属于哪个X8000
            not_found = True
            not_found = self.scan_file_system(device_sn, fs_dict, not_found, res_list)

            if not_found:
                res_list.append({"role": role, "deviceSN": device_sn, "filesystems": [fs_dict]})
        return res_list

    def scan_file_system(self, device_sn, fs_dict, not_found, res_list):
        for res in res_list:
            if res.get("deviceSN", "") == device_sn:
                filesystems = res.get("filesystems", [])
                no_same_file_system = True
                # 如果当前有同名文件系统，将mountPath合并
                for filesystem in filesystems:
                    no_same_file_system = self.scan_same_system(filesystem, fs_dict, no_same_file_system)
                # 如果当前没有当前没有同名文件系统，独立添加到文件系统列表
                if no_same_file_system:
                    fs_dict_name = fs_dict.get("name", "")
                    log.info(f"no same file system with {fs_dict_name}")
                    res.get("filesystems", []).append(fs_dict)
                not_found = False
        log.info(f"Scan file system result {not_found}. {self.get_log_comm}")
        return not_found

    def scan_same_system(self, filesystem, fs_dict, no_same_file_system):
        if filesystem.get("name", "") == fs_dict.get("name", ""):
            no_same_file_system = False
        log.info(f"Scan file system result {no_same_file_system}. {self.get_log_comm}")
        return no_same_file_system

    def create_host_db(self):
        """
        创建dwsHosts.db文件
        """
        if not self._meta_area:
            log.error(f"No usable meta path.")
            return False
        self._db_name = os.path.join(self._meta_area, "meta", "dwsHosts.db")
        if os.path.islink(self._db_name):
            log.warn(f"This is a link file, remove it.")
            os.remove(self._db_name)
        if os.path.isfile(self._db_name):
            log.debug(f"Db {self._db_name} file exists.")
            return True
        try:
            object_conn = sqlite3.connect(self._db_name)
        except Exception as ex:
            log.error(f"Connect sqlite {self._db_name} failed for {ex}")
            return False
        if not object_conn:
            log.error(f"Connect sqlite {self._db_name} failed.")
            return False
        object_cur = object_conn.cursor()
        if not object_cur:
            log.error(f"Connect sqlite {self._db_name} failed.")
            return False
        object_cur.execute("CREATE TABLE IF NOT EXISTS [DwsHostFilesystemTable] ("
                           "[hostname] VARCHAR(256) NOT NULL PRIMARY KEY,"
                           "[filesystemName] VARCHAR(256) NOT NULL,"
                           "[filesystemId] VARCHAR(128) NOT NULL,"
                           "[filesystemDeviceId] VARCHAR(256) NOT NULL,"
                           "[rsv1] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            log.error(f"Create dws table failed.")
            return False
        #  更改权限
        os.chmod(self._db_name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
        os.chmod(os.path.join(self._meta_area, "meta"), stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
                 stat.S_IROTH | stat.S_IXOTH)
        if not exec_lchown(self._db_name, "root", "rdadmin"):
            log.error(f"Change owner for {self._db_name} failed.")
            return False
        log.info(f"Create db({self._db_name}) successfully.")
        return True

    def create_backupkey_db(self):
        """
        创建backupKey.db文件
        """
        self._backupkey_name = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta", "backupkey.db")
        if os.path.islink(self._backupkey_name):
            log.warn(f"This is a link file, remove it.")
            os.remove(self._backupkey_name)
        if os.path.isfile(self._backupkey_name):
            log.debug(f"Db {self._backupkey_name} file exists.")
            return True
        try:
            object_conn = sqlite3.connect(self._backupkey_name)
        except Exception as ex:
            log.error(f"Connect sqlite {self._backupkey_name} failed for {ex}")
            return False
        if not object_conn:
            log.error(f"Connect sqlite {self._backupkey_name} failed.")
            return False
        object_cur = object_conn.cursor()
        if not object_cur:
            log.error(f"Connect sqlite {self._backupkey_name} failed.")
            return False
        object_cur.execute("CREATE TABLE [BsaObjTable] ([copyId] VARCHAR(100) NOT NULL,"
                           "[objectSpaceName] VARCHAR(1024),[objectName] VARCHAR(1024) NOT NULL, "
                           "[bsaObjectOwner] VARCHAR(64),"
                           "[appObjectOwner] VARCHAR(64),[copyType] INTEGER(8),"
                           "[estimatedSize] VARCHAR(100) NOT NULL,"
                           "[resourceType] VARCHAR(32),[objectType] INTEGER(8),[objectStatus] INTEGER(8),"
                           "[objectDescription] VARCHAR(100),[objectInfo] VARCHAR(256),[timestamp] VARCHAR(64),"
                           "[restoreOrder] VARCHAR(100),[storePath] VARCHAR(1280) NOT NULL,"
                           "[filesystemName] VARCHAR(256) NOT NULL,[filesystemId] VARCHAR(128) NOT NULL,"
                           "[filesystemDeviceId] VARCHAR(256) NOT NULL,[rsv1] VARCHAR(256),[rsv2] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            log.error(f"Create dws table failed.")
            return False
        log.info(f"Create db {self._backupkey_name} successfully.")
        return True

    def do_post_job(self):
        log.info(f"step 3-1 start to do post job")
        # 删除进度文件
        try:
            progress_file = os.path.join(self._cache_area, f"progress_{self._job_id}_{self._sub_job_id}")
            if os.path.exists(progress_file):
                os.remove(progress_file)
            else:
                log.info(f'Progress file {progress_file} not exist! failed to delete while doing post job!')
        except Exception:
            log.warning("failed to delete while doing post job")

        # 获取备份结果，如果失败，要删除数据仓以及元数据仓的数据
        backup_result = self._json_param_object.get("backupJobResult")
        log.info(f"_cache_area:{self._cache_area}")
        log.info(f"_data_area:{self._data_area}")
        log.info(f"_meta_area:{self._meta_area}")
        if backup_result != BackupJobResult.FAIL:
            # 存储上一次copy_id
            self.save_last_id()
            return

        # 差异备份失败场景下，删除backup_key列表中当前的copy_id
        if self.backup_type == BackupTypeEnum.DIFF_BACKUP:
            log.info(f"diff back job failed, delete cur backup key. {self.get_log_comm()}")
            last_full_copy_id = self.get_last_full_copy_id()
            cache_path_parent = Path(self._cache_area).parent
            backup_key_file_meta = os.path.join(cache_path_parent, last_full_copy_id, "meta",
                                                last_full_copy_id, "backup_key")
            previous_backup_keys = read_file(backup_key_file_meta)
            log.info(f"get_old_backup_key backup_key_info: {previous_backup_keys}. {self.get_log_comm()}")
            suffix_pos = previous_backup_keys.rfind(',')
            suffix_copy_id = previous_backup_keys[suffix_pos + 1:]
            cur_backup_keys = previous_backup_keys
            if suffix_copy_id == self._copy_id:
                cur_backup_keys = previous_backup_keys[0: suffix_pos]
            log.info(f"save_new_backup_key backup_key_info: {cur_backup_keys}. {self.get_log_comm()}")
            exec_overwrite_file(backup_key_file_meta, cur_backup_keys)
        return

    def get_last_full_copy_info(self):
        last_copy_type = LastCopyType.last_copy_type_dict.get(2)
        input_param = {
            RpcParamKey.APPLICATION: self._json_param_object.get("job", {}).get("protectObject"),
            RpcParamKey.TYPES: last_copy_type,
            RpcParamKey.COPY_ID: "",
            RpcParamKey.JOB_ID: self._job_id
        }
        try:
            result = exec_rc_tool_cmd(self._job_id, RpcParamKey.QUERY_PREVIOUS_CPOY, input_param)
        except Exception as err_info:
            log.warning(f"Get last copy info fail.{err_info}")
            return {}
        return result

    def get_last_copy_info(self):
        last_copy_type = LastCopyType.last_copy_type_dict.get(1)
        input_param = {
            RpcParamKey.APPLICATION: self._json_param_object.get("job", {}).get("protectObject"),
            RpcParamKey.TYPES: last_copy_type,
            RpcParamKey.COPY_ID: "",
            RpcParamKey.JOB_ID: self._job_id
        }
        try:
            result = exec_rc_tool_cmd(self._job_id, RpcParamKey.QUERY_PREVIOUS_CPOY, input_param)
        except Exception as err_info:
            log.error(f"Get last copy info fail.{err_info}")
            return {}
        return result

    def log_format(self):
        """
        功能： 日志格式化返回
        """
        return f"pid: {self._pid}, job_id: {self._job_id}, sub_job_id: {self._sub_job_id()}"

    def upload_log_backup_info(self):
        # 日志备份完成，需查询可恢复时间范围并上报，以及查询cn重启标志，并记录
        # 查询basic_recovery_time相对文件路径
        source_path = os.path.join(self._meta_area, "meta", self._copy_id, "objectmeta")
        restore_time_file_relative_path, file_system_name = self.query_restore_time_file_relative_path(source_path)
        repo_list = self.get_repo_list()
        log.info(f"get repo list {repo_list}")
        mount_path = ""
        # 构造文件系统名 - 文件系统路径的map
        # 遍历 repo_list 数组
        for repo in repo_list:
            # 遍历 filesystems 数组
            for filesystem in repo['filesystems']:
                # 判断 name 是否匹配
                if filesystem['name'] == file_system_name:
                    # 输出对应的 mountPath
                    mount_path = filesystem['mountPath'][0]
        # 组装路径
        log.info(f"Cur backup type is log, mount path: {mount_path}")
        log.info(f"Cur backup type is log, restore_time_file_relative_path: {restore_time_file_relative_path}")

        # 组装basic_recovery_time文件绝对路径
        restore_time_file_absolute_path = mount_path + restore_time_file_relative_path
        log.info(f"restore_time_file_absolute_path{restore_time_file_absolute_path}")
        # 取出basic_recovery_time，取出开始时间, 结束时间列表, 以及cn重启标记
        lines = self.read_xbsa_objects(restore_time_file_absolute_path)
        time_body_info = []
        cn_build_flag = 0
        for line in lines:
            if line == "":
                continue
            words = line.split(",")
            if len(words) > 1:
                time_body = [words[0], words[1]]
                time_body_info.append(time_body)
            else:
                cn_build_flag = line[len(line) - 2]

        # 写入cn重启标记
        last_copy_id_file = os.path.join(self._cache_area, "meta", self._copy_id, "logBackup", "cnRebuildFlag")
        last_copy_id_path = os.path.join(self._cache_area, "meta", self._copy_id, "logBackup")
        if not os.path.exists(last_copy_id_path):
            exec_mkdir_cmd(last_copy_id_path)
        exec_overwrite_file(last_copy_id_file, cn_build_flag)
        log.info(f"get last_copy_id_file: {last_copy_id_file}, cn_build_flag: {cn_build_flag}. {self.get_log_comm()}")

        # 上报restore_time
        ret = self.update_restore_time(time_body_info)
        if not ret:
            log.error(f"update restore time error")

        # 保存已归档到的时间
        self.save_archived_time(time_body_info)

    def save_archived_time(self, time_body_info):
        # 记录归档到的时间，下次日志归档从该时间开始
        archived_time = int(time_body_info[len(time_body_info) - 1][1])
        cache_path_parent = Path(self._cache_area).parent
        backup_key_file_archived_time = os.path.join(cache_path_parent, "archived_time")
        log.info(f"get backup archive time path: {backup_key_file_archived_time}, archived_time: {archived_time}")
        exec_overwrite_file(backup_key_file_archived_time, archived_time)

    def set_db_conf(self, param):
        log.info("Begin set gauss db config.")
        try:
            backup_sla = param.get("job", {}).get("extendInfo", {}).get("backupTask_sla")
            backup_policy = json.loads(backup_sla).get("policy_list")[0]
        except Exception as err:
            log.error(f"Get backup task sla failed, err: {err}.")
            return False

        try:
            instance_id = param.get("job", {}).get("protectObject", {}).get("id", "")
            return self._fun_inst.set_gaussdb_conf(instance_id, backup_policy)
        except Exception as err:
            log.error(f"Failed set gauss db config, err {err}.")
            return False
