#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import base64
import glob
import json
import os
import pathlib
import sqlite3
import stat
import threading
import time

from common.common import output_result_file, execute_cmd
from common.common_models import SubJobDetails, LogDetail, SubJobModel
from common.const import ParamConstant, SubJobStatusEnum, SubJobPriorityEnum, RepositoryDataTypeEnum, \
    RepoProtocalType, CMDResult
from common.env_common import get_install_head_path
from common.file_common import exec_lchown, exec_lchown_dir_recursively
from common.util.exec_utils import exec_mkdir_cmd, exec_overwrite_file, su_exec_cmd_list, ExecFuncParam
from common.util.validators import ValidatorEnum
from dws.commons.const import PERMISSION_755, DwsRetEnum, StorageRole
from dws.commons.function_tool import log_start
from tpops.gaussdb.common.const import SubJobType, SubJobPolicy, GaussSubJobName, EnvName, GaussBackupStatus, \
    GaussDBCode, LogLevel, RoachConstant, LastCopyType, RpcParamKey, TaskTypeEnum, GaussCopyType, JobInfo, \
    UserInfo, PermissionNode, BusinessConfigType, VERSION, ExtendInfoKeys, GaussDBRdsErrorCode, ErrorCode
from tpops.gaussdb.common.gaussdb_common import invoke_rpc_tool_interface, \
    report_job_details, get_std_in_variable, extract_ip, check_path_valid, get_agent_roach_host_and_port, \
    read_file, write_progress_file_with_status_and_speed, \
    set_user_and_group, set_permisson, check_path_in_white_list
from tpops.gaussdb.common.log import log
from tpops.gaussdb.common.safe_get_information import ResourceParam
from tpops.gaussdb.common.tpops_gaussdb_exception import GaussDBException
from tpops.gaussdb.handle.resource.resource_info import ResourceInfo
from tpops.gaussdb.handle.exec_base import ExecBase


class Restore(ExecBase):

    def __init__(self, pid, job_id, sub_job_id, data, json_param):
        super().__init__(pid, job_id, sub_job_id, data)
        if not json_param:
            log.error("Parse params obj is null.")
            raise Exception("Parse params obj is null.")
        self._agentcli_path = f"{get_install_head_path()}/DataBackup/ProtectClient/ProtectClient-E/bin/agentcli"
        self._json_param_object = json_param
        self._copy = {}
        self._last_backup_copy = {}
        self.get_param_from_file()
        self._copy_type = self._copy.get("type", "")
        self._meta_area, self._cache_area, self._data_area = self.get_available_path()
        self._copy_id = self._copy.get("id", "")
        self._host_ip = self._json_param_object.get("job", {}).get("targetEnv", {}).get("endpoint", "")
        self._logdetail = None
        self._err_info = {}
        self._query_progress_interval = 60
        self._instance_id = self._json_param_object.get("job", {}).get("targetObject", {}).get("id", "")
        self._sub_job_name = ""
        self._job_status = SubJobStatusEnum.RUNNING
        self._restore_status = GaussBackupStatus.RUNNING
        self._err_code = 0
        EnvName.IAM_USERNAME = "job_targetEnv_auth_authKey"
        EnvName.IAM_PASSWORD = "job_targetEnv_auth_authPwd"
        self._concrete_object_db = "backupkey.db"
        self.user_name = get_std_in_variable(f"{EnvName.IAM_USERNAME}_{pid}")
        # 组装资源接入请求体
        self._extend_info = self._copy.get("protectEnv", {}).get("extendInfo", {})
        self.backup_copy_id = self.get_backup_copy_id()
        target_extend_info = self._json_param_object.get("job", {}).get("targetEnv", {}).get("extendInfo", "")
        self.business_addr = target_extend_info.get("pmAddress", "")
        self.business_port = target_extend_info.get("pmPort", "")
        self.address = f"https://{self.business_addr}:{self.business_port}"
        self.restore_extend_info = self._json_param_object.get("job", {}).get("extendInfo", {})
        self.client_crt = self.restore_extend_info.get("clientCrt", "")
        self.restore_target_location = self.restore_extend_info.get("targetLocation", "")
        self._fun_inst = ResourceInfo(pid, self.address)
        self.client_key = self._fun_inst.client_key
        self.ca_cert_pem = self._fun_inst.ca_cert_pem
        self.rand_pass = self._fun_inst.rand_pass
        self.project_id = self._fun_inst.get_project_id()
        self.token = self._fun_inst.get_token_info()
        self._target_objectdata_path = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta")
        self._target_parent_id = self._json_param_object.get("job", {}).get("targetObject", {}).get("parentId", "")
        self._protect_parent_id = self._copy.get("protectObject", {}).get("parentId", "")

    @staticmethod
    def get_copy_params_by_key(param, json_const):
        param = param.get("job", {}).get("copies", [])[0].get("protectObject", {}).get("extendInfo", {}).get(json_const,
                                                                                                             "")
        if not param:
            log.error(f"Get param {json_const} from {param} failed.")
        return param

    @staticmethod
    def get_params_by_key(param, json_const):
        copies_info = param.get("job", {}).get("copies", [])
        param = copies_info[len(copies_info) - 1].get("protectObject", {}).get("extendInfo", {}).get(json_const, "")
        if not param:
            log.error(f"Get param {json_const} from {copies_info} failed.")
        return param

    @staticmethod
    def read_param_file(file_path):
        """
        解析参数文件
        :return:
        """
        if not os.path.isfile(file_path):
            raise Exception(f"File:{file_path} not exist")
        try:
            with open(file_path, "r", encoding='UTF-8') as file_name:
                json_dict = json.loads(file_name.read())
        except Exception as ex:
            raise Exception("parse param file failed") from ex
        return json_dict

    @staticmethod
    def set_error_response(response):
        response.code = GaussDBCode.FAILED.value
        response.body_err = GaussDBCode.FAILED.value

    def set_logdetail(self):
        err_dict = LogDetail(logInfo='', logInfoParam=[], logTimestamp=0, logDetail=0, logDetailParam=[],
                             logDetailInfo=[], logLevel=3)
        err_dict.log_detail = self._err_code
        self._logdetail = []
        self._logdetail.append(err_dict)
        return True

    def get_progress(self):
        log.info(f"start to get restore progress, instance_id: {self._instance_id}")

        # 获取恢复任务Id
        job_file = os.path.join(self._cache_area, f"jobid_{self._job_id}")
        if not os.path.exists(job_file):
            log.error(f"copy info path not exist.")
            return {}
        restore_job_id = self.read_param_file(job_file)
        log.info(f"get restore_job_id:{restore_job_id}")

        ret_body = self._fun_inst.get_job_info(restore_job_id)
        log.info(f"get restore_job get_progress ret_body:{ret_body}")
        if ret_body.get("error_code"):
            log.error(f'Exec GET error with return: {ret_body.get("error_msg")}')
            return {}
        log.info(f"end to get restore progress, instance_id: {self._instance_id}")
        return ret_body

    def get_log_comm(self):
        return f"pid:{self._pid} jobId:{self._job_id} subjobId:{self._sub_job_id}"

    def write_restore_progress(self):
        # 定时上报恢复进度
        while self._restore_status == GaussBackupStatus.RUNNING:
            log.info("Start to write progress.")
            # 没有进度文件可能是还没有生成,不返回失败
            # 读取任务状态
            progress_info = self.get_progress()
            job_json = progress_info.get("job", "")
            if job_json is None:
                log.warning("job_json is None")
                continue
            self._restore_status = job_json.get("status", "")
            if self._restore_status == GaussBackupStatus.RUNNING:
                status = SubJobStatusEnum.RUNNING
            elif self._restore_status == GaussBackupStatus.SUCCEED:
                status = SubJobStatusEnum.COMPLETED
            else:
                raise Exception(json.dumps(job_json))
            log.info(f"restore job status: {status}")
            # 读取任务进度
            try:
                progress = int(progress_info.get("progress", "0"))
            except Exception:
                log.error("Failed calculate progress")
                progress = 0
            log.info(f"restore job progress: {progress}")
            job_info = self.get_job_info()
            write_progress_file_with_status_and_speed(job_info, status, progress, True)
            # 30s查询一次
            time.sleep(self._query_progress_interval)

    def upload_restore_progress(self):
        # 定时上报恢复进度
        while self._job_status == SubJobStatusEnum.RUNNING:
            progress_file = os.path.join(self._cache_area, f"progress_{self._job_id}_{self._sub_job_id}")
            # 没有进度文件可能是还没有生成,不返回失败
            comm_progress_dict = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                               taskStatus=SubJobStatusEnum.RUNNING,
                                               progress=0, logDetail=self._logdetail)
            if not os.path.exists(progress_file):
                log.info("upload_restore_progress has no progress_file")
                report_job_details(self._job_id, comm_progress_dict.dict(by_alias=True))
                time.sleep(self._query_progress_interval)
                continue
            with open(progress_file, "r") as f_object:
                progress_dict = json.loads(f_object.read())
            self._job_status = progress_dict.get("taskStatus")
            if not self._job_status:
                log.error(f"Failed to obtain the task status.{self.get_log_comm()}")
                self._job_status = SubJobStatusEnum.FAILED
                fail_dict = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                          taskStatus=SubJobStatusEnum.FAILED.value, progress=100,
                                          logDetail=self._logdetail)
                progress_dict = fail_dict.dict(by_alias=True)
            log.info(f"upload_restore_progress has progress_file {progress_dict}")
            report_job_details(self._job_id, progress_dict)
            time.sleep(self._query_progress_interval)

    def restore_task(self):
        job_info = self.get_job_info()
        write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.RUNNING, 0, True)
        # 启动一个线程查询恢复进度
        sub_job_dict = {
            GaussSubJobName.SUB_XBSA: self.sub_job_xbsa,
            GaussSubJobName.SUB_ROACH: self.sub_job_roach,
            GaussSubJobName.SUB_EXEC: self.sub_job_exec
        }
        progress_thread = threading.Thread(name='exec_restore', target=self.upload_restore_progress)
        progress_thread.daemon = True
        progress_thread.start()
        # 执行子任务
        sub_job_name = ResourceParam.get_sub_job_name(self._json_param_object)
        log.info(f"sub_job_name :  {sub_job_name}")
        if not sub_job_name:
            return False
        self._sub_job_name = sub_job_name
        try:
            ret = sub_job_dict.get(sub_job_name)()
        except Exception as err:
            log.error(f"Exec sub job {sub_job_name} failed, error{err}, job info{self.get_log_comm()}.")
            self.report_job_fail(sub_job_name, err.args[0])
            return False
        if not ret:
            log.error(f"Exec sub job {sub_job_name} failed.{self.get_log_comm()}.")
            self.report_job_fail(sub_job_name, "")
            return False
        log_detail = LogDetail(logInfo="plugin_task_subjob_success_label", logInfoParam=[self._sub_job_id], logLevel=1)
        report_job_details(self._pid, SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=100,
                                                    logDetail=[log_detail],
                                                    taskStatus=SubJobStatusEnum.COMPLETED.value).dict(by_alias=True))
        log.info(f"Exec sub job success: {sub_job_name} .")
        progress_thread.join()
        return True

    def report_job_fail(self, sub_job_name, error_message):
        log_detail_param = []
        log.info(f"get error_message: {error_message}")
        self._err_code = ErrorCode.ERR_BACKUP_RESTORE
        log.info(f"get error_code: {self._err_code}")
        log_detail_param.append(self._instance_id)
        if self._err_code == ErrorCode.ERR_BACKUP_RESTORE:
            log_detail_param.append(error_message)
        if self._err_code == RpcParamKey.INSTANCE_VERSION_IS_NOT_SAME:
            log.info("restore to new instance failed for version different")
            source_instance_id = \
                self._json_param_object.get("job", {}).get("copies", [])[0].get("protectObject", {}).get("id", "")
            log_detail_param.append(source_instance_id)
        log_detail = LogDetail(logInfo="plugin_task_subjob_fail_label", logInfoParam=[self._sub_job_id],
                               logLevel=LogLevel.ERROR.value, logDetail=self._err_code,
                               logDetailParam=log_detail_param)
        report_job_details(self._pid,
                           SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=100,
                                         logDetail=[log_detail], taskStatus=SubJobStatusEnum.FAILED.value).dict(
                               by_alias=True))

    def build_sub_job(self, job_priority, job_type, job_name, node_id):
        return SubJobModel(jobId=self._job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, execNodeId=node_id,
                           jobPriority=job_priority, jobName=job_name, policy=job_type,
                           ignoreFailed=False).dict(by_alias=True)

    def do_post(self):
        log.info(f"step 3-1 start to do post job")
        progress_file = os.path.join(self._cache_area, f"progress_{self._job_id}_{self._sub_job_id}")
        if os.path.exists(progress_file):
            os.remove(progress_file)
            return True
        else:
            log.warn(f'Progress file not exist! failed to delete while doing post job!')
            return True

    def sub_job_xbsa(self):
        log.info(f"start to set xbsa backup media. {self.get_log_comm()}")
        # tpops版本直接返回成功
        job_info = self.get_job_info()
        if self._fun_inst.db_version == VERSION.TPOPS:
            write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.COMPLETED, 100, False)
            return True
        request_body = {
            "type": "xbsa"
        }
        try:
            ret_body = self._fun_inst.change_backup_media(self._instance_id, request_body)
        except Exception as e:
            log.error(f"change backup media to xbsa error exception: {e}")
            raise GaussDBException(f"set xbsa backup media failed. {e}")
        # 已经切换为xbsa将不再切换
        if ret_body.get("errCode") == GaussDBRdsErrorCode.REPEAT_CHANGE_BACKUP_MEDIA_TO_XBSA:
            write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.COMPLETED, 100, False)
            return True
        if "job_id" not in ret_body:
            log.error("set xbsa backup media failed.")
            write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.FAILED, 100, False)
            raise GaussDBException(f"set xbsa backup media failed. {ret_body}")
        job_id = ret_body.get("job_id")
        job_status = GaussBackupStatus.RUNNING
        while job_status == GaussBackupStatus.RUNNING:
            progress_info = self._fun_inst.get_job_info(job_id)
            job_json = progress_info.get("job")
            if job_json is None:
                log.warning("job_json is None")
                continue
            job_status = job_json.get("status", "")
            log.info(f"progress_info.job(status and progress): {job_json}")
            time.sleep(self._query_progress_interval)
            if job_status == GaussBackupStatus.RUNNING:
                status = SubJobStatusEnum.RUNNING
            elif job_status == GaussBackupStatus.SUCCEED:
                status = SubJobStatusEnum.COMPLETED
            else:
                status = SubJobStatusEnum.FAILED
            write_progress_file_with_status_and_speed(job_info, status, 0, False)
        log.info(f"set xbsa backup media finished. {self.get_log_comm()}")
        if job_status == GaussBackupStatus.SUCCEED:
            return True
        else:
            return False

    def sub_job_roach(self):
        log.info("step2-5 start to sub_job_roach")
        self.save_cacheinfo_to_cache()

        # 查询本节点的roach进程中配置的ip和端口，写入meta
        # 保存本节点roachAgent的id
        get_host_and_port_error_flag = False
        try:
            host, port = get_agent_roach_host_and_port()
        except Exception as err:
            get_host_and_port_error_flag = True
        if get_host_and_port_error_flag:
            raise Exception(RpcParamKey.ERR_BACKUP_AGENT_NOT_START)
        host_ip = host
        host_port = f"{host}:{port}"
        log.info(f"step2-5-1 sub_job_roach {host_port}")
        host_port_info_file = os.path.join(self._meta_area, "hostInfo", self._job_id)
        # 文件夹不存在就创建
        if not os.path.exists(host_port_info_file):
            log.info(f"start to make dir {host_port_info_file}")
            if not exec_mkdir_cmd(host_port_info_file):
                log.info(f"error happens when mkdir host_port_info_file {host_port_info_file}")

        host_port_file = os.path.join(self._meta_area, "hostInfo", self._job_id, f"host_port_{host_ip}")
        # 文件存在就删除
        if os.path.exists(host_port_file):
            log.info("start to remove File")
            os.remove(host_port_file)
        exec_overwrite_file(host_port_file, host_port)
        log.info(f"step2-5-1 end to exec_overwrite_file File host_port_file")
        job_info = self.get_job_info()
        write_progress_file_with_status_and_speed(job_info, SubJobStatusEnum.COMPLETED, 100, False)
        return True

    def sub_job_exec(self):
        ret = self.sub_job_exec_to_original()
        return ret

    def gen_sub_job(self):
        log.info(f"step 2-1 start to gen_sub_job {self._json_param_object}")

        # 录入备份开始时间
        start_time = str(int((time.time())))
        start_time_path = os.path.join(self._cache_area, f'T{self._job_id}')
        exec_overwrite_file(start_time_path, start_time, json_flag=False)
        log.info(f"success to write restore start time to start_time_path")

        file_path = os.path.join(ParamConstant.RESULT_PATH, f"result{self._pid}")
        sub_job_array = []

        nodes = self._json_param_object.get("job", {}).get("targetEnv", {}).get("nodes", [])
        if len(nodes) == 0:
            log.error("nodes is empty")
            return False
        # 子任务1：设置备份介质为xbsa
        job_policy = SubJobPolicy.LOCAL_NODE.value
        job_name = GaussSubJobName.SUB_XBSA
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_1
        node_id = nodes[0].get("id", "")
        sub_job = self.build_sub_job(job_priority, job_policy, job_name, node_id)
        sub_job_array.append(sub_job)

        # 子任务2：挂载
        job_type = SubJobPolicy.EVERY_NODE_ONE_TIME.value
        job_name = GaussSubJobName.SUB_ROACH
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_2
        node_id = nodes[0].get("id", "")
        sub_job = self.build_sub_job(job_priority, job_type, job_name, node_id)
        sub_job_array.append(sub_job)

        # 子任务3：执行
        job_type1 = SubJobPolicy.FIXED_NODE.value
        job_name = GaussSubJobName.SUB_EXEC
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_3
        for node in nodes:
            node_id = node.get("id")
            endpoint = node.get("endpoint")
            host_ip = extract_ip()
            if endpoint in host_ip:
                sub_job = self.build_sub_job(job_priority, job_type1, job_name, node_id)
                sub_job_array.append(sub_job)
        log.info(f"Sub-task splitting succeeded.sub-task num:{len(sub_job_array)}")
        exec_overwrite_file(file_path, sub_job_array)
        log.info("step 2-1 end to gen_sub_job")
        if self._restore_status == GaussBackupStatus.SUCCEED:
            return True
        else:
            return False

    def sub_job_set(self):
        log.info("step2-3 start to sub_job_set_extend_info")
        # 读取挂载子任务生成的文件
        host_port_file_path = os.path.join(self._meta_area, "hostInfo", self._job_id)
        ips = ""
        ports = ""
        log.info(f"step2-3 start to sub_job_set_extend_info host_port_file_path")
        lst = os.listdir(host_port_file_path)
        for i in lst:
            path_new = os.path.join(host_port_file_path, i)
            if os.path.isfile(path_new):
                # 读取文件
                host_port = self.read_param_file(path_new)
                host_ports = host_port.split(":")
                ips = f"{ips}{host_ports[0]},"
                ports = f"{ports}{host_ports[1]},"
        log.info(f"step2-3-1 start to sub_job_set_extend_info _instance_id:"
                 f"{self._instance_id} ips, ports.")
        ips = ips[:-1]
        ports = ports[:-1]
        log.info(f"step2-3-2 start to sub_job_set_extend_info _instance_id:"
                 f"{self._instance_id} ips, ports")
        # 调用设置扩展信息接口
        self._fun_inst.set_instance_extend_info_twice(self._instance_id, ips, ports, self.client_crt,
                                                      RoachConstant.XBSA_FILE_RESTORE_PATH)
        return True

    def sub_job_exec_to_original(self):
        log.info("step 2-3 start to sub_job_exec_to_original")
        self.sub_job_set()

        # 融合版本跨tpops恢复场景下，需将源tpops数据同步至新tpops
        if self._protect_parent_id != self._target_parent_id:
            log.info("This is a cross-tpops recovery.")
            self.copy_meta_info_to_new_cluster()

        # 获取backup_id，该字段下发备份任务时获取，用作恢复方法体
        backup_id = self.get_backup_id()
        log.info(f"restore backup id{backup_id}")

        request_body = self.get_restore_body(backup_id)
        log.info(f"get request_body {request_body}")
        # 开始恢复
        # 适配续作，已经下发的任务不再下发
        job_file = os.path.join(self._cache_area, f"jobid_{self._job_id}")
        if os.path.exists(job_file):
            log.info("skip cur restore request")
        else:
            ret_body = self._fun_inst.request_restore_job(self._instance_id, backup_id, request_body)
            log.info(f"request_restore_job ret_body:{ret_body}")

            # 保存jobid, 用作查询进度
            job_file = os.path.join(self._cache_area, f"jobid_{self._job_id}")
            if not ret_body.get("job_id", ""):
                log.error('Failed get job id!')
                return False
            exec_overwrite_file(job_file, ret_body.get("job_id", ""))

        # 循环获取进度写入文件
        self.write_restore_progress()
        log.info("step 2-3 end to exec_restore")
        return True

    def copy_meta_info_to_new_cluster(self):
        # 融合版本，跨tpops恢复场景下，需将旧tpops元数据，同步至新tpops
        if self._fun_inst.db_version == VERSION.TPOPS:
            log.info("Tpops no need to copy meta info to new cluster")
            return True
        source_instance_id = self._instance_id
        extend_info = self._fun_inst.get_instance_extend_info_twice(self._instance_id)
        if self.restore_target_location == "new":
            source_instance_id = \
                self._json_param_object.get("job", {}).get("copies", [])[0].get("protectObject", {}).get("id", "")
        request_body = {"start_time": "", "end_time": "", "media_type": "XBSA",
                        "prepared_instance_id": self._instance_id, "source_instance_id": source_instance_id}
        cur_time = int(time.time())
        copy_time = self._copy.get("timestamp", cur_time)
        nine_hours_in_seconds = 9 * 3600 * 1000
        request_body["start_time"] = str(copy_time * 1000 - nine_hours_in_seconds)
        request_body["end_time"] = str(copy_time * 1000 + nine_hours_in_seconds)
        log.info(f"copy meta info to new cluster request body {request_body}")
        try:
            ret_body = self._fun_inst.copy_backup_meta_info_to_new_cluster(self._instance_id, request_body)
        except Exception as e:
            log.error(f"Copy backup info to new cluster failed: {e}")
            return False
        log.info(f"copy meta info to new cluster return body {ret_body}")
        if "job_id" not in ret_body:
            log.error(f"Copy backup info to new cluster failed, return info: {ret_body}.")
            return False
        job_id = ret_body.get("job_id")
        job_status = GaussBackupStatus.RUNNING
        while job_status == GaussBackupStatus.RUNNING:
            progress_info = self._fun_inst.get_job_info(job_id)
            job_json = progress_info.get("job")
            if job_json is None:
                log.warning("job_json is None")
                continue
            job_status = job_json.get("status", "")
            log.info(f"progress_info.job(status and progress): {job_json}")
            time.sleep(self._query_progress_interval)
        log.info(f"Copy backup meta info finished. {self.get_log_comm()}")
        return True if job_status == GaussBackupStatus.SUCCEED else False

    def get_backup_id(self):
        # 获取backup_id，该字段下发备份任务时获取，用作恢复方法体
        mount_path_parent = pathlib.Path(self._cache_area).parent
        # 获取backup_id，该字段下发备份任务时获取，用作恢复方法体
        backup_id_file = os.path.join(mount_path_parent, self.backup_copy_id, "meta", self.backup_copy_id, "backup_id")
        log.info(f"get backup_id_file {backup_id_file}")
        log.info(f"get backup_copy_id {self.backup_copy_id}")
        if not os.path.exists(backup_id_file):
            log.warn(f"cache rep not have backup_id exist. {backup_id_file}")
            # 反向复制场景, backup_id从meta仓获取
            backup_id_file = os.path.join(self._meta_area, "meta", self.backup_copy_id,
                                          "backup_id")
            if not os.path.exists(backup_id_file):
                log.error(f"meta rep not have backup_id exist.")
                return ''
        backup_id = read_file(backup_id_file)
        log.info(f"get restore backup id{backup_id}.{self.get_log_comm()}")
        return backup_id

    def get_restore_body(self, backup_id):
        # 恢复到新位置时, 源实例ID取保护对象的ID
        source_instance_id = self._instance_id
        extend_info = self._fun_inst.get_instance_extend_info_twice(self._instance_id)
        if self.restore_target_location == "new":
            source_instance_id = \
                self._json_param_object.get("job", {}).get("copies", [])[0].get("protectObject", {}).get("id", "")
        log.info(f"get_restore_body get source_instance_id: {source_instance_id}")
        # 获取恢复请求体
        if self.get_copy_type() == "log":
            restore_body = {
                "source": {
                    "instance_id": "",
                    "type": "",
                    "restore_time": ""
                },
                "target": {
                    "instance_id": ""
                }
            }
            restore_body["source"]["instance_id"] = source_instance_id
            restore_body["source"]["type"] = "timestamp"
            restore_time = self.restore_extend_info.get("restoreTimestamp", "")
            restore_body["source"]["restore_time"] = restore_time + "000"
            restore_body["target"]["instance_id"] = self._instance_id
        else:
            restore_body = {"source": {"instance_id": "", "type": "", "backup_id": ""}, "target": {"instance_id": ""}}
            restore_body["source"]["instance_id"] = source_instance_id
            restore_body["source"]["type"] = "backup"
            restore_body["source"]["backup_id"] = backup_id
            restore_body["target"]["instance_id"] = self._instance_id
        if self.restore_target_location == "new":
            restore_body["xbsa_extension_infos"] = [
                {
                    "key": ExtendInfoKeys.BACKUP,
                    "value": extend_info.get(ExtendInfoKeys.BACKUP)
                },
                {
                    "key": ExtendInfoKeys.XBSA,
                    "value": extend_info.get(ExtendInfoKeys.XBSA)
                }
            ]
        return restore_body

    def restore_pre_job(self):
        """
            检查检查region
            @return:
        """
        log.info("step 1-1 start to check region")
        # 检查region, 恢复仅可在同一region下进行
        error_code = GaussDBCode.SUCCESS.value

        # 如果是日志备份，且备份副本被标记了不可恢复，则报错、上报label
        if self._copy_type == GaussCopyType.LOG:
            can_restore_flag = self._copy.get("extendInfo", {}).get("canRestore")
            if can_restore_flag is not None and not can_restore_flag:
                log_detail = LogDetail(logInfo="tpops_log_restore_with_incomplete_copy_label", logInfoParam=[],
                                       logLevel=3)
                report_job_details(self._job_id,
                                   SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=100,
                                                 logDetail=[log_detail],
                                                 taskStatus=SubJobStatusEnum.FAILED.value).dict(by_alias=True))
                return False, GaussDBCode.FAILED.value

        log.info(f"step 1-3 start to make file for xbsa")
        log.info(f"json_param_object: {self._json_param_object}")
        log.info(f"meta_area: {self._meta_area}")
        log.info(f"cache_area: {self._cache_area}")
        log.info(f"data_area: {self._data_area}")
        self.create_path_for_backup()
        self.save_task_info_to_cache()
        self.save_bussiness_config()
        self.create_host_db()

        # 归档副本不需要写入映射关系
        if self._copy_type != "s3Archive":
            self.write_fs_relationship()
        else:
            temp_path = os.path.join(f'{self._cache_area}', 'tmp', self._copy_id, 'roach_client')
            if not os.path.exists(temp_path):
                exec_mkdir_cmd(temp_path)

        # 聚合backupkey.db文件
        self.merge_xbsa_info()

        # 任务完成，写入进度文件
        result_file = os.path.join(self._cache_area, "RestorePrerequisiteProgress")
        pathlib.Path(result_file).touch()
        return True, error_code

    def merge_xbsa_info(self):
        # 目标路径
        target_path = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta")
        if not os.path.exists(target_path):
            if not exec_mkdir_cmd(target_path):
                return False
        # 源路径
        mount_path_parent = pathlib.Path(self._cache_area).parent
        restore_sqlite_path = os.path.join(mount_path_parent, self.backup_copy_id, "meta", self.backup_copy_id,
                                        "restoreSqlite")
        # 适配补丁前副本恢复场景
        if self.get_copy_type() == GaussCopyType.LOG and os.path.exists(restore_sqlite_path):
            object_data_path = restore_sqlite_path
        else:
            object_data_path = os.path.join(mount_path_parent, self.backup_copy_id, "meta", self.backup_copy_id,
                                            "logdb")
        if not os.path.exists(object_data_path):
            log.error(f"No object data path({object_data_path}) in cache repo exists. main task:{self._job_id}")
            object_data_path = os.path.join(self._meta_area, "meta", self.backup_copy_id,
                                            "logdb")
            if not os.path.exists(object_data_path):
                log.error(f"No object data path({object_data_path}) in meta repo exists. main task:{self._job_id}")
                return False
        try:
            ret = self.aggregate_single_copy_object_data(target_path, object_data_path)
        except Exception as err:
            log.error(f"Aggregate object data failed. main task:{self._job_id}, err: {err}")
            return False
        if not ret:
            log.error(f"Aggregate object data failed. main task:{self._job_id}")
            return False
        return True

    def restore_prerequisite_progress(self):
        """
        根据是否有进度文件 来判断前置任务检查结果
        @return:
        """
        # 当前根据是否存在RestorePrerequisiteProgress文件来上报前置任务进度
        log.info(f"step 1-4 start to upload restore prepare job progress ")
        job_status = SubJobStatusEnum.COMPLETED.value
        file_path = os.path.join(self._cache_area, "RestorePrerequisiteProgress")
        fail_file_path = os.path.join(self._cache_area, "RestorePrerequisiteFailProgress")
        progress = 100
        if not os.path.exists(file_path):
            log.info("restore_prerequisite_progress is running")
            job_status = SubJobStatusEnum.RUNNING.value
            progress = 0
        if os.path.exists(fail_file_path):
            log.info("restore_prerequisite_progress is fail")
            job_status = SubJobStatusEnum.FAILED.value
            progress = 0
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                               taskStatus=job_status, progress=progress, logDetail=self._logdetail)
        output_result_file(self._pid, output.dict(by_alias=True))

    def get_copy_type(self):
        """
        返回值：full/increment/replication/s3Archive etc
        """
        backup_type = self._copy.get("type")
        log.info(f"get backup type: {backup_type}")
        if not backup_type:
            log.error(f"No backup type info in extendInfo, main task: {self._job_id}.")
            return ""
        return backup_type

    def get_backup_copy_id(self):
        if self.get_copy_type() == "s3Archive" or self.get_copy_type() == "tapeArchive":
            return self._copy.get("extendInfo", {}).get("extendInfo", {}).get("copyId", "")
        else:
            return self._copy.get("extendInfo", {}).get("copyId", "")

    def create_path_for_backup(self):
        """
            创建过程中所需要的目录
            @return:
        """
        meta_path_object_host = os.path.join(self._meta_area, 'meta', self._copy_id, 'objectmeta', self._host_ip)

        if not os.path.exists(meta_path_object_host):
            if not exec_mkdir_cmd(meta_path_object_host):
                log.error("Create meta_path_object_host failed.")
                return False
        self.change_restore_meta_permission(meta_path_object_host)

        cache_path_object_host = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta")
        if not os.path.exists(cache_path_object_host):
            if not exec_mkdir_cmd(cache_path_object_host):
                log.error(f"Create meta_path_object_host {meta_path_object_host} failed.")
                return False
        self.change_cache_meta_permmison(cache_path_object_host)

        # 创建speed/{host_key} 目录
        cache_path_speed_host = os.path.join(self._cache_area, 'tmp', self._copy_id, 'speed', self._host_ip)
        if not os.path.exists(cache_path_speed_host):
            if not exec_mkdir_cmd(cache_path_speed_host):
                log.error("Create cache_path_speed failed.")
                return False
        self.change_cache_temp_permmison(cache_path_speed_host)
        return True

    def change_cache_temp_permmison(self, cache_path_speed_host):
        set_user_and_group(cache_path_speed_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_speed_host, PermissionNode.PERMISSION_700)
        # cache仓/tmp/copy_id/speed
        cache_path_speed = os.path.join(self._cache_area, 'tmp', self._copy_id, 'speed')
        set_user_and_group(cache_path_speed, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_speed, PermissionNode.PERMISSION_700)
        # cache仓/tmp
        meta_path_tmp = os.path.join(self._cache_area, 'tmp')
        set_user_and_group(meta_path_tmp, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_tmp, PermissionNode.PERMISSION_750)
        # cache仓/tmp/copy_id
        meta_path_tmp_copy_id = os.path.join(self._cache_area, 'tmp', self._copy_id)
        set_user_and_group(meta_path_tmp_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_tmp_copy_id, PermissionNode.PERMISSION_750)

    def change_cache_meta_permmison(self, cache_path_object_host):
        set_user_and_group(cache_path_object_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_object_host, PermissionNode.PERMISSION_700)
        # cache仓
        meta_path = os.path.join(self._cache_area)
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # cache仓/meta
        meta_path = os.path.join(self._cache_area, 'meta')
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # cache仓/meta/copy_id
        meta_path_copy_id = os.path.join(self._cache_area, 'meta', self._copy_id)
        set_user_and_group(meta_path_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_copy_id, PermissionNode.PERMISSION_750)
        # cache仓/meta/copy_id/objectmeta
        meta_path_objectmeta = os.path.join(self._cache_area, 'meta', self._copy_id, 'objectmeta')
        set_user_and_group(meta_path_objectmeta, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_objectmeta, PermissionNode.PERMISSION_700)

    def change_restore_meta_permission(self, meta_path_object_host):
        set_user_and_group(meta_path_object_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_object_host, PermissionNode.PERMISSION_700)
        # meta仓
        meta_path = os.path.join(self._meta_area)
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # meta仓/meta
        meta_path = os.path.join(self._meta_area, 'meta')
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # meta仓/meta/copy_id
        meta_path_copy_id = os.path.join(self._meta_area, 'meta', self._copy_id)
        set_user_and_group(meta_path_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_copy_id, PermissionNode.PERMISSION_750)
        # meta仓/meta/copy_id/objectmeta
        meta_path_objectmeta = os.path.join(self._meta_area, 'meta', self._copy_id, 'objectmeta')
        set_user_and_group(meta_path_objectmeta, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_objectmeta, PermissionNode.PERMISSION_700)

    def save_task_info_to_cache(self):
        """
            功能描述：写入cache仓下/tmp/copy_id/taskInfo_{hostKey}.txt
            参数：
            返回值: True or False
        """
        copy_type_map = {
            "full": 1,
            "increment": 2,
            "diff": 3,
            "log": 3,
            "nativeSnapshot": 5,
            "foreverIncrement": 6,
            "replication": 7,
            "s3Archive": 8,
            "tapeArchive": 9,
            "clone": 10
        }
        if self._copy_type == "s3Archive":
            self._copy_type = "full"
        task_info = {
            "repositories": self.get_repo_list(),
            "taskType": TaskTypeEnum.RESTORE.value,
            "copyType": copy_type_map.get(self._copy_type)
        }
        log.info(f"get task_info {task_info}")
        if self._copy_type == "s3Archive":
            archive_ip, archive_port = self.get_archive_ip_port()
            ret_code, ssl_enable = self.get_archive_ssl_enable()
            if not archive_ip or not archive_port or not ret_code:
                return False
            archive_file_servers = []
            for single_ip in archive_ip:
                archive_file_servers.append({"ip": single_ip, "port": archive_port, "sslEnabled": ssl_enable})
            task_info["archiveFileServers"] = archive_file_servers
        task_path = os.path.join(self._cache_area, 'tmp', self._copy_id, f'taskInfo_{self._host_ip}.txt')
        log.info(f"save_task_info_to_cache: {task_info}. {self.get_log_comm()}")
        # 获取文件系统、逻辑端口，保存到agent的cache仓
        exec_overwrite_file(task_path, task_info)
        set_user_and_group(task_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(task_path, PermissionNode.PERMISSION_400)
        if not os.path.exists(task_path):
            log.error(f"Create taskInfo_host_ip_txt file failed.")
            return False
        return True

    def get_archive_ip_port(self):
        """
        归档直接恢复时，获取dme_archive的IP和端口
        返回值: ip列表，端口
        """
        ip_list = []
        port = ""
        archive_info_list = []
        for repo in self._copy.get("repositories", []):
            if repo.get("protocol", "") != RepoProtocalType.S3 \
                    or repo.get("repositoryType", "") == RepositoryDataTypeEnum.CACHE_REPOSITORY:
                continue
            archive_info_list = repo.get("extendInfo", {}).get("service_info", [])
            if archive_info_list:
                log.info(f"archive_info_list: {archive_info_list}")
                break
        if not archive_info_list:
            log.error(f"Fail to get archive info. main task: {self._job_id}")
            return [], ""
        for single_arch in archive_info_list:
            temp_ip = single_arch.get("ip", "")
            temp_port = single_arch.get("port", "")
            if port and port != temp_port:
                log.error(f"All port should be the same. {port}, {temp_port}. main task: {self._job_id}")
                return [], ""
            if temp_ip and temp_port:
                ip_list.append(temp_ip)
                port = temp_port
            else:
                log.error(f"The archive ip({temp_ip} or port({temp_port})). main task: {self._job_id}")
                return [], ""
        return ip_list, port

    def get_archive_ssl_enable(self):
        """
        用于S3直接恢复时，获取是否enable ssl。
        返回值：是否获取成功， 获取的值
        """
        for repo in self._copy.get("repositories", []):
            if repo.get("protocol", "") != RepoProtocalType.S3 \
                    or repo.get("repositoryType", "") == RepositoryDataTypeEnum.CACHE_REPOSITORY:
                continue
            return True, repo.get("extendInfo", {}).get("enable_ssl", "")
        log.error(f"Fail to get archive ssl enable. main task: {self._job_id}")
        return False, ""

    def save_cacheinfo_to_cache(self):
        """
            功能描述：写入DataBackup下/stmp/dws_cacheInfo.txt
            参数：
            返回值: True or False
        """
        # 写cache仓路径到tmp目录， 每个节点 各享一个
        cache_info = {
            "cacheRepoPath": self._cache_area,
            "metaRepoPath": self._meta_area,
            "copyId": self._copy_id,
            "taskId": self._job_id,
            "hostKey": self._host_ip
        }
        instance_id = self._instance_id
        if self.restore_target_location == "new":
            instance_id = self._json_param_object.get("job", {}).get("copies", [])[0].get("protectObject", {}).get("id",
                                                                                                                   "")
        log.info(f"Instance id is {instance_id}")
        cache_info_path = os.path.join(RoachConstant.XBSA_FILE,
                                       f"xbsa_cacheInfo_info_tpops_{instance_id}_xbsa_restore.txt")
        exec_overwrite_file(cache_info_path, cache_info)
        set_user_and_group(cache_info_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_info_path, PermissionNode.PERMISSION_400)
        if not os.path.exists(cache_info_path):
            log.error("Create dws_cacheInfo.txt failed.")
            return False
        return True

    def create_host_db(self):
        """
            创建dwsHosts.db文件
        """
        if not self._meta_area:
            log.error(f"No usable meta path.")
            return False
        db_name = os.path.join(self._meta_area, "meta", "dwsHosts.db")
        if os.path.islink(db_name):
            log.warn(f"This is a link file, remove it.")
            os.remove(db_name)
        if os.path.isfile(db_name):
            log.debug(f"Db {db_name} file exists.")
            return True
        try:
            object_conn = sqlite3.connect(db_name)
        except Exception as ex:
            log.error(f"Connect sqlite {db_name} failed for {ex}")
            return False
        if not object_conn:
            log.error(f"Connect sqlite {db_name} failed.")
            return False
        object_cur = object_conn.cursor()
        if not object_cur:
            log.error(f"Connect sqlite {db_name} failed.")
            return False
        object_cur.execute("CREATE TABLE IF NOT EXISTS [DwsHostFilesystemTable] ("
                           "[hostname] VARCHAR(256) NOT NULL PRIMARY KEY,"
                           "[filesystemName] VARCHAR(256) NOT NULL,"
                           "[filesystemId] VARCHAR(128) NOT NULL,"
                           "[filesystemDeviceId] VARCHAR(256) NOT NULL,"
                           "[rsv1] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            log.error(f"Create dws table failed.")
            return False
        #  更改权限
        os.chmod(db_name, stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
        os.chmod(os.path.join(self._meta_area, "meta"), stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP |
                 stat.S_IROTH | stat.S_IXOTH)
        if not exec_lchown(db_name, "root", "rdadmin"):
            log.error(f"Change owner for {db_name} failed.")
            return False
        log.info(f"Create db({db_name}) successfully.")
        return True

    def save_bussiness_config(self):
        """
            功能描述：写入DataBackup下/stmp/dws_cacheInfo.txt
            参数：
            返回值: True or False
        """
        # 写cache仓路径到tmp目录， 每个节点 各享一个
        if self.is_increment_copy():
            restore_file_type = BusinessConfigType.INCREMENT_RESTORE_TYPE
        else:
            restore_file_type = BusinessConfigType.FULL_RESTORE_TYPE
        business_type_info_dir = os.path.join(self._cache_area, "tmp", self._copy_id)
        if not os.path.exists(business_type_info_dir):
            if not exec_mkdir_cmd(business_type_info_dir):
                return False
        business_type_info = {"jobType": restore_file_type}
        business_type_info_path = os.path.join(self._cache_area, "tmp",
                                               self._copy_id, RoachConstant.BUSINESS_CONFIG_FILE)
        log.info(f"save bussiness config {business_type_info} to {business_type_info_path}")
        exec_overwrite_file(business_type_info_path, business_type_info)
        if not os.path.exists(business_type_info_path):
            log.error("Create dws_cacheInfo.txt failed.")
            return False
        set_user_and_group(business_type_info_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(business_type_info_path, PermissionNode.PERMISSION_400)
        return True

    def create_backupkey_db(self):
        """
            创建backupKey.db文件
        """
        if not self._meta_area:
            log.error(f"No usable meta path.")
            return False
        backupkey_name = os.path.join(self._cache_area, "meta", self._job_id, "objectmeta", "backupkey.db")
        if os.path.islink(backupkey_name):
            log.warn(f"This is a link file, remove it.")
            os.remove(backupkey_name)
        if os.path.isfile(backupkey_name):
            log.debug(f"Db backupkey_name file exists.")
            return True
        try:
            object_conn = sqlite3.connect(backupkey_name)
        except Exception as ex:
            log.error(f"Connect sqlite backupkey_name failed for {ex}")
            return False
        if not object_conn:
            log.error(f"Connect sqlite backupkey_name failed.")
            return False
        object_cur = object_conn.cursor()
        if not object_cur:
            log.error(f"Connect sqlite backupkey_name failed.")
            return False
        object_cur.execute("CREATE TABLE [BsaObjTable] ([copyId] VARCHAR(100) NOT NULL,"
                           "[objectSpaceName] VARCHAR(1024),[objectName] VARCHAR(1024) NOT NULL,"
                           "[bsaObjectOwner] VARCHAR(64),"
                           "[appObjectOwner] VARCHAR(64),[copyType] INTEGER(8),"
                           "[estimatedSize] VARCHAR(100) NOT NULL,"
                           "[resourceType] VARCHAR(32),[objectType] INTEGER(8),[objectStatus] INTEGER(8),"
                           "[objectDescription] VARCHAR(100),[objectInfo] VARCHAR(256),[timestamp] VARCHAR(64),"
                           "[restoreOrder] VARCHAR(100),[storePath] VARCHAR(1280) NOT NULL,"
                           "[filesystemName] VARCHAR(256) NOT NULL,[filesystemId] VARCHAR(128) NOT NULL,"
                           "[filesystemDeviceId] VARCHAR(256) NOT NULL,[rsv1] VARCHAR(256),[rsv2] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            log.error(f"Create dws table failed.")
            return False
        log.info(f"Create db backupkey_name successfully.")
        return True

    def get_repo_list(self):
        """
        功能描述： 从任务参数中解析出x8000和文件系统的对应关系
        参数：
        @job_info: JobInfo 任务信息
        返回值： list
        """
        res_list = []
        if self._copy_type == "s3Archive":
            for repo in self._copy.get("extendInfo", {}).get("repositories", []):
                if repo.get("type") != RepositoryDataTypeEnum.DATA_REPOSITORY:
                    continue
                role = repo.get("role", "")
                esn = repo.get("extendInfo", {}).get("esn", "")
                file_systems = self.get_archive_filesystems_from_remote_path(repo)
                res_list.append({"role": role, "deviceSN": esn, "filesystems": file_systems})
            return res_list

        repositories = self._copy.get("repositories", [])
        if self.get_copy_type() == "log":
            log_repositories = self._copy.get("repositories", [])
            data_repositories = self._last_backup_copy.get("repositories", [])
            repositories = log_repositories + data_repositories
        for rep in repositories:
            repository_type = rep.get("repositoryType")
            if repository_type != 1 and repository_type != 3:
                continue
            role = rep.get("role")
            device_sn = rep.get("extendInfo").get("esn")
            remote_path = rep.get("remotePath", "").strip("/").split("/")[0]
            mount_path = rep.get("path")
            log.info(f"Get type {repository_type} repo , mount_path is : {mount_path}")
            # 日志备份场景下，给xbsa的mountpath取父路径，恢复时xbsa可据xbsa文件系统表查询多次日志备份副本
            mount_path = self.build_mount_paht_preffix(mount_path, repository_type)
            fs_dict = {
                "id": rep.get("extendInfo", {}).get("fsId", ""),
                "name": remote_path,
                "sharePath": rep.get("remotePath", "").strip("/"),
                "mountPath": mount_path
            }

            # 判断当前文件系统属于哪个X8000
            not_found = True
            not_found = self.scan_file_system(device_sn, fs_dict, not_found, res_list)

            if not_found:
                res_list.append({"role": role, "deviceSN": device_sn, "filesystems": [fs_dict]})
        return res_list

    def build_mount_paht_preffix(self, mount_path, repository_type):
        if repository_type == 3:
            mount_path = [s[:s.rfind('/')] for s in mount_path]
            log.info(f"Cur backup type is log, mount path: {mount_path}")
        return mount_path

    def scan_file_system(self, device_sn, fs_dict, not_found, res_list):
        for res in res_list:
            if res.get("deviceSN", "") == device_sn:
                filesystems = res.get("filesystems", [])
                no_same_file_system = True
                # 如果当前有同名文件系统，将mountPath合并
                for filesystem in filesystems:
                    no_same_file_system = self.scan_same_system(filesystem, fs_dict, no_same_file_system)
                # 如果当前没有当前没有同名文件系统，独立添加到文件系统列表
                if no_same_file_system:
                    fs_dict_name = fs_dict.get("name", "")
                    log.info(f"no same file system with {fs_dict_name}")
                    res.get("filesystems", []).append(fs_dict)
                not_found = False
        return not_found

    def scan_same_system(self, filesystem, fs_dict, no_same_file_system):
        if filesystem.get("name", "") == fs_dict.get("name", ""):
            no_same_file_system = False
        return no_same_file_system

    def get_archive_filesystems_from_remote_path(self, repo):
        file_systems = []
        for single_path in repo.get("remotePath", []):
            if single_path.get("type", "") == RepositoryDataTypeEnum.META_REPOSITORY:
                continue
            logic_ips_list = []
            for lif_ip in single_path.get("remoteHost", []):
                logic_ips_list.append({"ip": lif_ip.get("ip")})
            fs_dict = {
                "id": single_path.get("id", ""),
                "name": single_path.get("path").split('/')[1],
                "sharePath": single_path.get("path").split('/')[1],
                "logicIps": logic_ips_list
            }
            file_systems.append(fs_dict)
        log.debug(f"Archive file systems: {file_systems}. main task: {self._job_id}")
        return file_systems

    def get_available_path(self):
        if self._copy_type == GaussCopyType.S3ARCHIVE:
            return self.get_available_path_archive()
        elif self._copy_type == GaussCopyType.LOG:
            return self.get_available_path_log()
        else:
            return self.get_available_path_common()

    def get_available_path_archive(self):
        """
        功能描述：归档直接恢复场景下，仅可获取cache仓
        """
        cache_path = self.get_cache_path()
        metadata_path = cache_path
        data_path = ""
        return metadata_path, cache_path, data_path

    def get_available_path_log(self):
        """
        功能描述： 按时间恢复场景下，
        """
        # 日志备份场景下，日志副本不携带cache仓，从上一次备份副本下取log仓
        metadata_path = self.get_metadata_path()
        cache_path = self.get_last_copy_cache_path()
        data_path = self.get_log_data_path()
        return metadata_path, cache_path, data_path

    def get_single_type_repo_path_in_copy_info(self, target_type, copy_info):
        repositories = copy_info.get("repositories")
        if not repositories or len(repositories) == 0:
            log.error(f"Fail to get repositories, main task: {self._job_id}.")
            return ""
        tmp_repo = {}
        for repo in repositories:
            if repo.get("repositoryType") == target_type:
                tmp_repo = repo
                log.info(f"repositoryType is target_type. target_type :{target_type}")
                break
        if not tmp_repo:
            log.warn(f"Fail to get type({target_type}), main task: {self._job_id}.")
            return ""
        tmp_path_list = tmp_repo.get("path")
        if not tmp_path_list or len(tmp_path_list) == 0:
            log.warn(f"Fail to get path {target_type}, main task: {self._job_id}.")
            return ""
        tmp_path = ""
        for item in tmp_path_list:
            if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                log.info(f"Get log data path success. {item}")
                tmp_path = item
                break
        if not tmp_path:
            log.error("Have no can available data path")
            return ""
        return tmp_path

    def get_log_data_path(self):
        # 按时间恢复场景，在日志副本中，获取log仓
        log_repo_path = self.get_single_type_repo_path_in_copy_info(RepositoryDataTypeEnum.LOG_REPOSITORY, self._copy)
        log_repo_path = os.path.join(log_repo_path, "data")
        return log_repo_path

    def get_data_path(self):
        data_repo_path = self.get_single_type_repo_path_in_copy_info(RepositoryDataTypeEnum.DATA_REPOSITORY, self._copy)
        data_repo_path = os.path.join(data_repo_path, "data")
        return data_repo_path

    def get_metadata_path(self):
        """
        get metadata repository. return a list ['', '', '']
        """
        meta_data_path = self.get_single_type_repo_path_in_copy_info(RepositoryDataTypeEnum.META_REPOSITORY, self._copy)
        return meta_data_path

    def get_cache_path(self):
        """
        get cache repository. return a list ['', '', '']
        """
        cache_data_path = self.get_single_type_repo_path_in_copy_info(RepositoryDataTypeEnum.CACHE_REPOSITORY,
                                                                      self._copy)
        return cache_data_path

    def get_last_copy_cache_path(self):
        # 按时间恢复场景，无法在日志副本中获取cache仓，从上一次备份副本中获取
        last_copy_cache_path = self.get_single_type_repo_path_in_copy_info(RepositoryDataTypeEnum.CACHE_REPOSITORY,
                                                                           self._last_backup_copy)
        # 校验路径是否在白名单
        ret, real_path = check_path_in_white_list(last_copy_cache_path)
        if not ret:
            raise Exception("last_copy_cache_path invalid")
        return last_copy_cache_path

    def get_available_path_common(self):
        """
        功能描述：从所有元数据目录和cache仓里拿一套可访问的目录
        """
        metadata_path = self.get_metadata_path()
        cache_path = self.get_cache_path()
        data_path = self.get_data_path()
        return metadata_path, cache_path, data_path

    def get_last_copy_info(self, backup_type=None):
        last_copy_type = LastCopyType.FULL_DIFF_COPIES
        input_param = {
            RpcParamKey.APPLICATION: self._copy.get("protectObject"),
            RpcParamKey.TYPES: last_copy_type,
            RpcParamKey.COPY_ID: "",
            RpcParamKey.JOB_ID: self._job_id
        }
        try:
            result = invoke_rpc_tool_interface(RpcParamKey.QUERY_PREVIOUS_CPOY, input_param)
        except Exception as err_info:
            log.error(f"Get last copy info fail.{err_info}")
            return {}
        return result

    def is_increment_copy(self):
        """
        判断是否是增量备份
        """
        copies_info = self._json_param_object.get("job", {}).get("copies", [])
        if len(copies_info) > 1:
            return True
        return False

    def get_fs_relation(self, copy_info):
        """
        用于使用复制副本恢复时，获取映射关系
        """
        copy_type = self.get_copy_type()
        if copy_type == "s3Archive" or copy_type == "tapeArchive":
            extend_info = copy_info.get("extendInfo", {}).get("extendInfo", {})
        else:
            extend_info = copy_info.get("extendInfo", {})
        return extend_info.get("fsRelations", {}).get("relations", [])

    @log_start()
    def get_objectdata_from_archive(self):
        # 1.1 获取副本ID
        copy_id = self._copy_id
        if not copy_id:
            log.error(f"Fail to get copy id. main task:{self._job_id}")
            return False
        # 1.2 获取archive地址
        archive_ip, archive_port = self.get_archive_ip_port()
        if not archive_ip or not archive_port:
            return False
        archive_addr = ""
        for ip_address in archive_ip:
            archive_addr = "".join([archive_addr, f"{ip_address},"])
        archive_addr = f"{archive_addr.strip(',')}:{archive_port}"
        log.debug(f"Archive address : {archive_addr}")
        # 1.3 获取从archive获取数据写入的本地路径
        if self._cache_area == "":
            log.error(f"No cache path can be read or written. cache_path: {self._cache_area}, "
                      f"main task:{self._job_id}")
            return False
        self._target_objectdata_path = os.path.join(self._cache_area, "meta", copy_id, "objectmeta")
        if not os.path.exists(self._target_objectdata_path):
            exec_mkdir_cmd(self._target_objectdata_path)
        temp_dir = os.path.join(self._cache_area, "meta")
        if not exec_lchown_dir_recursively(temp_dir, "rdadmin", "rdadmin"):
            log.error(f"Fail to change user/group for {temp_dir}. main task:{self._job_id}")
            return False
        meta_repo_path = self.get_archive_meta_prefix()
        ret = self.download_repo_path_from_archive(meta_repo_path, archive_addr)
        if not ret:
            log.error("Download meta repo data from archive failed")
            return False
        return True

    def get_param_from_file(self):
        log.info("Start to get restore param file.")
        job_info = self._json_param_object.get("job")
        if not job_info:
            log.error(f"No job info in param file, main task: {self._job_id}, pid: {self._pid}")
            return False
        copies_info = job_info.get("copies")
        if not copies_info or len(copies_info) == 0:
            log.error(f"No copy info in param file, main task: {self._job_id}, pid: {self._pid}")
            return False
        self._copy = copies_info[len(copies_info) - 1]
        if len(copies_info):
            self._last_backup_copy = copies_info[len(copies_info) - 2]
        self._initialized = True
        log.info("Analyze param file successfully.")
        return True

    def aggregate_single_copy_object_data(self, cache_path, object_data_path):
        """
        功能描述：将单个副本里的所有对象数据文件聚合成一个
        cache_path：合成的文件要存放的路径
        object_data_path：被合成的对象数据文件存放路径
        """
        log.info(f"Start to merge db in object_data_path {object_data_path} to cache_path {cache_path}")
        if os.path.islink(os.path.join(cache_path, self._concrete_object_db)):
            os.remove(os.path.join(cache_path, self._concrete_object_db))

        try:
            object_conn = sqlite3.connect(os.path.join(cache_path, self._concrete_object_db))
        except Exception as ex:
            log.error(f"Connect sqlite {self._concrete_object_db} failed for {ex}.main task:{self._job_id}")
            return False
        object_cur = object_conn.cursor()
        if not object_conn or not object_cur:
            log.error(f"Connect sqlite {self._concrete_object_db} failed.main task:{self._job_id}")
            return False
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            object_cur.execute("CREATE TABLE [BsaObjTable] ([copyId] VARCHAR(100) NOT NULL,"
                               "[objectSpaceName] VARCHAR(1024),[objectName] VARCHAR(1024) NOT NULL,"
                               "[bsaObjectOwner] VARCHAR(64),[appObjectOwner] VARCHAR(64),[copyType] INTEGER(8),"
                               "[estimatedSize] VARCHAR(100) NOT NULL,[resourceType] VARCHAR(32),"
                               "[objectType] INTEGER(8),[objectStatus] INTEGER(8),[objectDescription] VARCHAR(100),"
                               "[objectInfo] VARCHAR(256),[timestamp] VARCHAR(64),[restoreOrder] VARCHAR(100),"
                               "[storePath] VARCHAR(1280) NOT NULL,[filesystemName] VARCHAR(256) NOT NULL,"
                               "[filesystemId] VARCHAR(128) NOT NULL,[filesystemDeviceId] VARCHAR(256) NOT NULL,"
                               "[rsv1] VARCHAR(256),[rsv2] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables or len(object_tables) == 0:
            log.error(f"Create dws table failed. main task:{self._job_id}")
            return False

        db_file_list = self.get_all_db_files(object_data_path)
        if not db_file_list:
            log.error("No db file")
            return False
        for db_file in db_file_list:
            temp_object_cur = sqlite3.connect(db_file).cursor()
            for line in temp_object_cur.execute("select * from BsaObjTable").fetchall():
                str_line = str(line).replace("None", "''")
                log.info(f"start to read line from source")
                object_cur.execute(f"insert into BsaObjTable values {str_line}")
                object_conn.commit()
        object_cur.close()
        object_conn.close()
        xbsa_resource_path = os.path.join(cache_path, self._concrete_object_db)
        if os.path.isfile(xbsa_resource_path):
            set_user_and_group(xbsa_resource_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
            set_permisson(xbsa_resource_path, PermissionNode.PERMISSION_700)
        log.info("success to merge sqlite")
        return True

    def get_archive_meta_prefix(self):
        """
        用于归档到云副本直接恢复。在前置任务获取对象数据时，需要拿到对象数据目标的绝对路径前缀
        """
        remote_path = []
        for repo in self._copy.get("extendInfo", {}).get("repositories", []):
            if repo.get("role", "") == StorageRole.MASTER and \
                    repo.get("type", "") == RepositoryDataTypeEnum.DATA_REPOSITORY:
                remote_path = repo.get("remotePath", [])
                log.error(f"Fail to get archive remote path. main task: {self._job_id}")
                break
        if not remote_path:
            log.error(f"Fail to get archive remote path. main task: {self._job_id}")
            return ""
        target_path = ""
        repository_type = RepositoryDataTypeEnum.META_REPOSITORY
        for path in remote_path:
            if path.get("type", "") == repository_type:
                target_path = path.get("path", "")
                log.info(f"Get target_path")
                break
        pos = target_path.index("source_policy")
        target_path = target_path[-(len(target_path) - pos):]
        target_path = os.path.join(target_path, "objectmeta")
        return target_path

    def get_all_db_files(self, object_data_path):
        """
        获取所有要被聚合的db文件
        """
        db_file_list = []
        for host_key_path in os.listdir(object_data_path):
            db_path = os.path.join(object_data_path, host_key_path)
            if not os.path.isdir(db_path) or len(glob.glob(os.path.join(db_path, "*.db"))) == 0:
                log.warn(f"There is no object data in metadata path {db_path}. main task:{self._job_id}")
                continue

            for database in os.listdir(db_path):
                if not database.endswith(".db"):
                    continue
                db_file_list.append(os.path.join(db_path, database))
        return db_file_list

    def get_file_attribute(self, file_name):
        """
        获取指定文件/目录归属的用户，组
        """
        object_file_attribute = os.stat(file_name)
        log.info(f"Get user id object_file_attribute.st_uid, and group id object_file_attribute.st_gid success. "
                 f"main task:{self._job_id}")
        return object_file_attribute.st_uid, object_file_attribute.st_gid

    def write_fs_relationship(self):
        # 创建映射关系文件存放的目录并更改权限，只用于备份副本，复制副本，复制副本归档到云的副本
        log.info(f"start to write_fs_relationship")
        cache_path = self._cache_area
        if not cache_path:
            log.error(f"No cache_path path can be read or written."
                      f"metadata_path:{cache_path}, main task:{self._job_id}")
            return False
        temp_relation_path = os.path.join(cache_path, "meta", self._copy_id, "replication")
        if self.get_copy_type() == "tapeArchive":
            temp_relation_path = os.path.join(self._meta_area, "meta",
                                              self._copy_id, "archiveDownload")
        log.info(f"_write_fs_relationship temp_relation_path {temp_relation_path}")
        if not os.path.exists(temp_relation_path):
            log.info(f"No replication dir: {temp_relation_path}")
            if not exec_mkdir_cmd(temp_relation_path):
                return False
        temp_relation_file = os.path.join(temp_relation_path, "filesystemRelationship.txt")
        if os.path.islink(temp_relation_file):
            try:
                os.remove(temp_relation_file)
            except Exception as err:
                log.error(f"Relation file is a link file. {err}")
                return False
        # 获取映射关系
        if self.get_copy_type() == "log":
            relation_info = self.get_log_fs_relation(self._last_backup_copy, self._copy)
        elif self.get_copy_type() == "tapeArchive":
            relation_info = self.get_tape_fs_relation(self._copy)
        else:
            relation_info = self.get_fs_relation(self._copy)
        if not relation_info:
            log.error(f"Fail to get relation info. main task:{self._job_id}")
            return False
        log.info(f"write fs relationship get relation_info: {relation_info}")
        json_data = {"relations": relation_info}
        try:
            if not exec_overwrite_file(temp_relation_file, json_data):
                log.error(f"Fail to write relation file")
                return False
        except Exception as err:
            log.error(f"Fail to write relation file for {err}")
            return False
        log.info("write fs relationship success")
        set_user_and_group(temp_relation_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(temp_relation_path, PermissionNode.PERMISSION_700)
        set_user_and_group(temp_relation_file, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(temp_relation_file, PermissionNode.PERMISSION_400)
        log.info(f"end to write_fs_relationship")
        return True

    def get_tape_fs_relation(self, log_copy_info):
        """
        用于使用复制副本恢复时，获取映射关系
        """
        repo_relations_path = os.path.join(self._meta_area, 'meta', self.backup_copy_id, 'repoRelations')
        old_relations = read_file(repo_relations_path)
        log.info(f"get_tape_fs_relation old relations: {old_relations}")
        relations = []
        tape_repositories = log_copy_info.get("repositories", [])
        index = 0
        for rep in tape_repositories:
            repository_type = rep.get("repositoryType")
            if repository_type != 1:
                continue
            device_sn = rep.get("extendInfo").get("esn")
            fs_id = rep.get("extendInfo").get("fsId")
            role = rep.get("role")
            remote_path = rep.get("remotePath", "").strip("/").split("/")[0]
            tape_relations = [
                {
                    'newEsn': device_sn,
                    'newFsId': fs_id,
                    'newFsName': remote_path,
                    'oldEsn': old_relations[index].get("oldEsn", ""),
                    'oldFsId': old_relations[index].get("oldFsId", ""),
                    'oldFsName': old_relations[index].get("oldFsName", ""),
                    'role': role
                }
            ]
            relations = relations + tape_relations
            index = index + 1
        log.info(f"get_tape_fs_relation new relations: {relations}")
        return relations

    def get_log_fs_relation(self, data_copy_info, log_copy_info):
        """
        用于使用复制副本恢复时，获取映射关系
        """
        # 读取data仓映射关系
        extend_info = data_copy_info.get("extendInfo", {})
        data_relations = extend_info.get("fsRelations", {}).get("relations", [])
        # 读取log仓映射关系
        log_extend_info = log_copy_info.get("extendInfo", {})
        log_relations = log_extend_info.get("fsRelations", {}).get("relations", [])
        relations = data_relations + log_relations
        log.info(f"get_log_fs_relation success. {self.get_log_comm()}")
        return relations

    def get_single_type_repo_path(self, target_type):
        repositories = self._copy.get("repositories")
        if not repositories or len(repositories) == 0:
            log.error(f"Fail to get repositories, main task: {self._job_id}.")
            return ""
        tmp_repo = {}
        for repo in repositories:
            if repo.get("repositoryType") == target_type:
                log.info(f"repositoryType is target_type")
                tmp_repo = repo
                break
        if not tmp_repo:
            log.warn(f"Fail to get type({target_type}), main task: {self._job_id}.")
            return ""
        tmp_path = tmp_repo.get("path")
        if not tmp_path:
            log.warn(f"Fail to get path {target_type}, main task: {self._job_id}.")
            return ""
        return tmp_path

    def handle_single_speed_file(self, speed_file):
        with open(speed_file, "r") as tmp_fo:
            lines = tmp_fo.readlines()
            if len(lines) == 0:
                log.warn(f"Fail to read size file speed_file")
                return 0
            speed_dict = json.loads(lines[0])

            log.debug(f"speed_dict in speed_file")
            single_host_size = speed_dict.get("totalSizeInMB", 0)
            if not single_host_size:
                log.warn(f"No totalSizeInMB in speed_file for {self._job_id}")
                return 0
            return single_host_size

    @log_start()
    def download_repo_path_from_archive(self, repo_path, archive_addr):
        archive_copy_id = self._copy.get("id", "")
        restore_object = repo_path
        if not restore_object:
            log.error(f"Fail to get prefix. main task:{self._job_id}")
            return False
        restore_object = f"/{restore_object}/"
        log.debug(f"The archive search path is {restore_object}")
        try:
            restore_object = base64.b64encode(restore_object.encode(encoding='utf-8', errors='strict'))
        except Exception as err:
            log.error(f"Fail to transfer {restore_object} to bytes for {err}. main task:{self._job_id}")
            return False
        restore_object = restore_object.decode('utf-8')
        # 修改资源id目录权限
        if self._cache_area == "":
            log.error(f"No cache path can be read or written. cache_path: {self._cache_area}, "
                      f"main task:{self._job_id}")
            return False
        os.chmod(os.path.dirname(self._cache_area), PERMISSION_755)
        # 1.4 执行获取命令
        param = ExecFuncParam(os_user="rdadmin", cmd_list=[
            "{agentcli_path} GetFileFromArchive {archive_copy_id} {archive_addr} {target_objectdata_path} "
            f"{restore_object}"],
                              fmt_params_list=[[("agentcli_path", self._agentcli_path, ValidatorEnum.PATH_CHK_FILE),
                                                ("archive_copy_id", archive_copy_id, ValidatorEnum.REGEX_CHK_UUID4),
                                                ("archive_addr", archive_addr, ValidatorEnum.CHAR_CHK_COMMON),
                                                ("target_objectdata_path", self._target_objectdata_path,
                                                 ValidatorEnum.PATH_CHK_DIR)]],
                              shell_file="/bin/sh", chk_exe_owner=False)
        try:
            result, last_before = su_exec_cmd_list(param)
        except Exception as err:
            log.error(f"execute_cmd {restore_object} to bytes for {err}. main task:{self._job_id}")
            return False
        log.info(f"su_exec_cmd_list,{result}")
        if result != CMDResult.SUCCESS.value:
            log.error(f"Get object data from archive failed. last_before:{last_before}. "
                      f"main task:{self._job_id}")
            return False
        log.debug(f"Execute cmd success. restore_object: {restore_object}, output: {last_before}")
        return True

    def get_job_info(self):
        job_info = JobInfo(pid=self._pid,
                           job_id=self._job_id,
                           sub_job_id=self._sub_job_id,
                           copy_id=self._copy_id,
                           sub_job_type=self._sub_job_name,
                           cache_path=self._cache_area,
                           instance_id=self._instance_id)
        return job_info
