#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import base64
import json
import glob
import os
import pathlib
import shutil
import sqlite3
import stat
import threading
import time
import re

from pathlib import Path
from common.cleaner import clear
from common.common_models import SubJobDetails, LogDetail, SubJobModel
from common.const import ParamConstant, SubJobStatusEnum, SubJobPriorityEnum, RepositoryDataTypeEnum, \
    RepoProtocalType, CMDResult
from common.common import output_result_file
from common.env_common import get_install_head_path
from common.file_common import exec_lchown, exec_lchown_dir_recursively
from common.util.exec_utils import exec_mkdir_cmd, exec_overwrite_file, su_exec_cmd_list, ExecFuncParam
from common.util.validators import ValidatorEnum
from dws.commons.const import PERMISSION_755, StorageRole
from dws.commons.function_tool import log_start
from hcs.gaussdb.common.const import SubJobType, SubJobPolicy, GaussSubJobName, EnvName, GaussBackupStatus, \
    GaussDBCode, LogLevel, ErrorCode, RoachConstant, LastCopyType, RpcParamKey, TaskTypeEnum, \
    GaussCopyType, JobInfo, RepositoryType, UserInfo, PermissionNode, BusinessConfigType
from hcs.gaussdb.common.gaussdb_common import report_job_details, get_std_in_variable, \
    extract_ip, check_path_valid, get_agent_roach_host_and_port, \
    exec_rc_tool_cmd, is_increment_copy, get_all_db_files, check_path_safe, read_file, \
    write_progress_file_with_speed, set_user_and_group, set_permisson, query_size_and_speed
from hcs.gaussdb.common.gaussdb_restful import request_post
from hcs.gaussdb.common.safe_get_information import ResourceParam
from hcs.gaussdb.common.gaussdb_common import mount_bind_backup_path, umount_bind_backup_path
from hcs.gaussdb.handle.resource.resource_info import ResourceInfo
from hcs.gaussdb.logger import log
from hcs.sdk.hcs_manager import HcsManager


class Restore:

    def __init__(self, pid, job_id, sub_job_id, data, json_param):
        if not json_param:
            log.error("Parse params obj is null.")
            raise Exception("Parse params obj is null.")
        self._agentcli_path = f"{get_install_head_path()}/DataBackup/ProtectClient/ProtectClient-E/bin/agentcli"
        self._std_in = data
        self._pid = pid
        self._job_id = job_id
        self._sub_job_id = sub_job_id
        self._json_param_object = json_param
        self._copy = {}
        # 日志恢复场景下，上一个备份仓库下，有cache仓信息
        self._last_backup_copy = {}
        self._get_param_from_file()
        self._copy_type = self._copy.get("type", "")
        if self._copy_type == "s3Archive":
            self._copy_type = "full"
        self._meta_area, self._cache_area, self._data_area = self.get_available_path()
        # 当前需要恢复副本的ID，普通恢复时，为普通副本ID; 归档恢复时，为固定副本ID
        self._copy_id = self._copy.get("id", "")
        self._host_ip, port = get_agent_roach_host_and_port()
        self._logdetail = None
        self._err_info = {}
        self._query_progress_interval = 60
        self._instance_id = self._json_param_object.get("job", {}).get("targetObject", {}).get("id", "")
        self._sub_job_name = ""
        self._job_status = SubJobStatusEnum.RUNNING.value
        self._restore_status = GaussBackupStatus.RUNNING
        self._err_code = 0
        self._concrete_object_db = "backupkey.db"
        self.user_name = get_std_in_variable(f"{EnvName.IAM_USERNAME}_{pid}")
        # 组装资源接入请求体
        self._extend_info = self._copy.get("protectEnv", {}).get("extendInfo", {})
        # 归档恢复场景下，为与归档副本关联的普通副本ID
        self.backup_copy_id = self.get_backup_copy_id()
        self._project_name = self._extend_info.get("projectName", "")
        self._project_id = self._extend_info.get("projectId", "")
        self._project_addr = self._extend_info.get("pmAddress", "")
        self._project_domain = self._extend_info.get("iamAccountName", "")
        self._business_addr = self._extend_info.get("businessAddr", "")
        self.restore_extend_info = self._json_param_object.get("job", {}).get("extendInfo", {})
        self.client_crt = self.restore_extend_info.get("client_crt", "")
        self.restore_target_location = self.restore_extend_info.get("targetLocation", "")
        self._fun_inst = ResourceInfo(pid, data, self._project_addr, self._project_name,
                                      self._project_domain, self._business_addr, self._project_id)
        self.client_key = self._fun_inst.client_key
        self.ca_cert_pem = self._fun_inst.ca_cert_pem
        self.rand_pass = self._fun_inst.rand_pass
        self.restore_to_new_password = self._fun_inst.restore_to_new_password
        self.restore_time = ""
        self._target_objectdata_path = ""

    @staticmethod
    def read_param_file(file_path):
        """
        解析参数文件
        :return:
        """
        if not os.path.isfile(file_path):
            raise Exception(f"File:{file_path} not exist")
        try:
            with open(file_path, "r", encoding='UTF-8') as file_name:
                json_dict = json.loads(file_name.read())
        except Exception as ex:
            raise Exception("parse param file failed") from ex
        return json_dict

    @staticmethod
    def set_error_response(response):
        response.code = GaussDBCode.FAILED.value
        response.body_err = GaussDBCode.FAILED.value

    @staticmethod
    def get_repository_path(file_content, repository_type):
        copies_info = file_content.get("job", {}).get("copies", [])
        repositories = copies_info[len(copies_info) - 1].get("repositories", [])
        repositories_path = ""
        for repository in repositories:
            if repository['repositoryType'] == repository_type:
                repositories_path = repository["path"][0]
                log.info(f"repositories_path {repositories_path}")
                break
        return repositories_path

    @staticmethod
    def get_params_by_key(param, json_const):
        copies_info = param.get("job", {}).get("copies", [])
        param = copies_info[len(copies_info) - 1].get("protectObject", {}).get("extendInfo", {}).get(json_const, "")
        if not param:
            log.error(f"Get param json_const failed.")
        return param

    @staticmethod
    def get_copy_params_by_key(param, json_const):
        param = param.get("job", {}).get("copies", [])[0].get("protectObject", {}).get("extendInfo", {}).get(json_const,
                                                                                                             "")
        if not param:
            log.error(f"Get param json_const failed.")
        return param

    @staticmethod
    def remove_dir(path):
        # 可删除目录或文件
        # 删除目录
        if os.path.isdir(path):
            shutil.rmtree(path)
        # 删除文件
        elif os.path.isfile(path):
            os.remove(path)

    @staticmethod
    def scan_same_system(filesystem, fs_dict, no_same_file_system):
        if filesystem.get("name", "") == fs_dict.get("name", ""):
            no_same_file_system = False
        return no_same_file_system

    @staticmethod
    def build_mount_paht_preffix(mount_path, repository_type):
        if repository_type == RepositoryType.LOG:
            mount_path = [s[:s.rfind('/')] for s in mount_path]
            log.info(f"Cur backup type is log, mount path: {mount_path}")
        return mount_path

    def set_logdetail(self):
        err_dict = LogDetail(logInfo='', logInfoParam=[], logTimestamp=0, logDetail=0, logDetailParam=[],
                             logDetailInfo=[], logLevel=3)
        err_dict.log_detail = self._err_code
        self._logdetail = []
        self._logdetail.append(err_dict)
        return True

    def get_copy_type(self):
        """
        返回值：full/increment/replication/s3Archive etc
        """
        backup_type = self._copy.get("type")
        log.info(f"get backup type: {backup_type}")
        if not backup_type:
            log.error(f"No backup type info in extendInfo, main task: {self._job_id}.")
            return ""
        return backup_type

    def get_backup_copy_id(self):
        if self.get_copy_type() == "s3Archive" or self.get_copy_type() == "tapeArchive":
            return self._copy.get("extendInfo", {}).get("extendInfo", {}).get("copyId", "")
        else:
            return self._copy.get("extendInfo", {}).get("copyId", "")

    def set_log_detail_with_params(self, log_label, sub_job_id, err_code=None, log_detail_param=None,
                                   log_level=LogLevel.INFO.value):
        log_dict = LogDetail(logInfo=log_label,
                             logInfoParam=[sub_job_id],
                             logTimestamp=int(time.time()),
                             logDetail=err_code,
                             logDetailParam=log_detail_param,
                             logLevel=log_level)
        self._logdetail = []
        self._logdetail.append(log_dict)
        return True

    def get_progress(self):
        # 获取恢复任务Id
        job_file = os.path.join(self._cache_area, f"jobid_{self._job_id}")
        if not os.path.exists(job_file):
            log.error(f"copy info path not exist.")
            return {}
        restore_job_id = read_file(job_file)
        log.info(f"get restore_job_id:{restore_job_id}")

        ret_body = HcsManager.get_job_progress(self._json_param_object, self._pid, restore_job_id)
        return ret_body

    def get_log_comm(self):
        return f"pid:{self._pid} jobId:{self._job_id} subjobId:{self._sub_job_id}"

    def write_restore_progress(self):
        # 定时上报恢复进度
        while self._restore_status == GaussBackupStatus.RUNNING:
            # 没有进度文件可能是还没有生成,不返回失败
            # 读取任务状态
            progress_info = self.get_progress()
            log.info(f"Start to write progress. {progress_info}, job id: {self._job_id}")
            job_json = progress_info.get("job", "")
            self._restore_status = job_json.get("status", "")
            if self._restore_status == GaussBackupStatus.RUNNING:
                status = SubJobStatusEnum.RUNNING.value
            elif self._restore_status == GaussBackupStatus.SUCCEED:
                status = SubJobStatusEnum.COMPLETED.value
            else:
                status = SubJobStatusEnum.FAILED.value
            log.info(f"restore job status: {status}")
            # 读取任务进度
            try:
                progress_msg = progress_info.get("job", "{}").get("progress", "0")
                progress = int(progress_msg[0:len(progress_msg) - 1])
            except Exception:
                log.error("Failed calculate progress")
                progress = 0
            log.info(f"restore job progress: {progress}")
            job_info = self.get_job_info()
            write_progress_file_with_speed(job_info, status, progress)
            # 30s查询一次
            time.sleep(self._query_progress_interval)
        return self._restore_status == GaussBackupStatus.SUCCEED

    def upload_restore_progress(self):
        # 定时上报恢复进度
        while self._job_status == SubJobStatusEnum.RUNNING.value:
            progress_file = os.path.join(self._cache_area, f"progress_{self._job_id}_{self._sub_job_id}")
            # 没有进度文件可能是还没有生成,不返回失败
            comm_progress_dict = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                               taskStatus=SubJobStatusEnum.RUNNING.value,
                                               progress=0, logDetail=self._logdetail)
            if not os.path.exists(progress_file):
                log.info("upload_restore_progress has no progress_file")
                report_job_details(self._job_id, comm_progress_dict.dict(by_alias=True))
                time.sleep(self._query_progress_interval)
                continue
            with open(progress_file, "r") as f_object:
                progress_dict = json.loads(f_object.read())
            self._job_status = progress_dict.get("taskStatus")
            if not self._job_status:
                log.error(f"Failed to obtain the task status.{self.get_log_comm()}")
                self._job_status = SubJobStatusEnum.FAILED.value
                fail_dict = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                          taskStatus=SubJobStatusEnum.FAILED.value, progress=100,
                                          logDetail=self._logdetail)
                progress_dict = fail_dict.dict(by_alias=True)
            log.info(f"upload_restore_progress has progress_file {progress_dict}")
            time.sleep(self._query_progress_interval)
            report_job_details(self._job_id, progress_dict)

    def restore_task(self):
        job_info = self.get_job_info()
        write_progress_file_with_speed(job_info, SubJobStatusEnum.RUNNING.value, 0)
        # 启动一个线程查询恢复进度
        sub_job_dict = {
            GaussSubJobName.SUB_MOUNT: self.sub_job_mount,
            GaussSubJobName.SUB_EXEC: self.sub_job_exec,
            GaussSubJobName.SUB_UMOUNT: self.sub_job_umount
        }
        progress_thread = threading.Thread(name='exec_restore', target=self.upload_restore_progress)
        progress_thread.daemon = True
        progress_thread.start()
        # 执行子任务
        sub_job_name = ResourceParam.get_sub_job_name(self._json_param_object)
        if not sub_job_name:
            return False
        self._sub_job_name = sub_job_name
        try:
            ret = sub_job_dict.get(sub_job_name)()
        except Exception as err:
            self._job_status = SubJobStatusEnum.FAILED.value
            log.error(f"Exec sub job {sub_job_name} failed, error{err}, job info{self.get_log_comm()}.")
            self.report_job_fail(sub_job_name, err.args[0])
            return False
        if not ret:
            log.error(f"Exec sub job {sub_job_name} failed.{self.get_log_comm()}.")
            self.report_job_fail(sub_job_name, "")
            return False
        log_detail = LogDetail(logInfo="plugin_task_subjob_success_label", logInfoParam=[self._sub_job_id], logLevel=1)
        # 获取速度，数据量大小
        time_file = os.path.join(job_info.cache_path, f'T{job_info.job_id}')
        if os.path.exists(time_file):
            size, speed = query_size_and_speed(job_info.cache_path, job_info.job_id, job_info.copy_id)
        else:
            size, speed = 0, 0
        log.info(f"write_progress_file_with_speed, size {size} KB, get speed {speed} KB/s")
        report_job_details(self._pid, SubJobDetails(taskId=self._job_id,
                                                    subTaskId=self._sub_job_id,
                                                    progress=100,
                                                    logDetail=[log_detail],
                                                    dataSize=size,
                                                    speed=speed,
                                                    taskStatus=SubJobStatusEnum.COMPLETED.value).dict(by_alias=True))
        log.info(f"Exec sub job success: {sub_job_name} .")
        progress_thread.join()
        return True

    def report_job_fail(self, sub_job_name, error_message):
        log_detail_param = []
        log.error(f"get error_message: {error_message}")
        if sub_job_name == GaussSubJobName.SUB_EXEC:
            self._err_code = ErrorCode.ERR_BACKUP_RESTORE
            log_detail_param.append(self._instance_id)
            log_detail_param.append(error_message)
        log_detail = LogDetail(logInfo="plugin_task_subjob_fail_label", logInfoParam=[self._sub_job_id],
                               logLevel=LogLevel.ERROR.value, logDetail=self._err_code,
                               logDetailParam=log_detail_param)
        report_job_details(self._pid,
                           SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=100,
                                         logDetail=[log_detail], taskStatus=SubJobStatusEnum.FAILED.value).dict(
                               by_alias=True))

    def build_sub_job(self, job_priority, job_type, job_name, node_id=None):
        return SubJobModel(jobId=self._job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, execNodeId=node_id,
                           jobPriority=job_priority, jobName=job_name, policy=job_type,
                           ignoreFailed=False).dict(by_alias=True)

    def do_post(self):
        log.info(f"step 3-1 start to do post job")
        progress_file = os.path.join(self._cache_area, f"progress_{self._job_id}_{self._sub_job_id}")
        if os.path.exists(progress_file):
            os.remove(progress_file)
            return True
        else:
            log.warn(f'Progress file {progress_file} not exist! failed to delete while doing post job!')
            return True

    def sub_job_exec(self):
        if self.restore_target_location == "original":
            # 恢复到原位置
            ret = self.sub_job_exec_to_original()
            return ret
        else:
            # 恢复到新位置
            ret = self.sub_job_exec_to_new()
            return ret

    def gen_sub_job(self):
        log.info("step 2-1 start to gen_sub_job")

        # 录入备份开始时间
        start_time = str(int((time.time())))
        start_time_path = os.path.join(self._cache_area, f'T{self._job_id}')
        exec_overwrite_file(start_time_path, start_time, json_flag=False)
        log.info(f"success to write restore start time {start_time} to {start_time_path}")

        file_path = os.path.join(ParamConstant.RESULT_PATH, f"result{self._pid}")
        sub_job_array = []

        # 子任务1：挂载
        nodes = self._json_param_object.get("job", {}).get("targetEnv", {}).get("nodes", [])
        job_type = SubJobPolicy.EVERY_NODE_ONE_TIME.value
        job_name = GaussSubJobName.SUB_MOUNT
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_1
        sub_job = self.build_sub_job(job_priority, job_type, job_name)
        sub_job_array.append(sub_job)

        # 子任务2：执行
        job_type = SubJobPolicy.ANY_NODE.value
        job_name = GaussSubJobName.SUB_EXEC
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_2
        sub_job = self.build_sub_job(job_priority, job_type, job_name)
        sub_job_array.append(sub_job)

        # 子任务3：卸载
        job_type = SubJobPolicy.EVERY_NODE_ONE_TIME.value
        job_name = GaussSubJobName.SUB_UMOUNT
        job_priority = SubJobPriorityEnum.JOB_PRIORITY_3
        sub_job = self.build_sub_job(job_priority, job_type, job_name)
        sub_job_array.append(sub_job)
        log.info(f"Sub-task splitting succeeded.sub-task num:{len(sub_job_array)}")
        exec_overwrite_file(file_path, sub_job_array)
        log.info("step 2-1 end to gen_sub_job")
        return True

    def sub_job_mount(self):
        log.info("step 2-2 start to mount")
        self.save_task_info_to_cache()
        self.save_cacheinfo_to_cache()
        self.create_path_for_backup()
        mount_bind_backup_path(self._data_area)

        # 查询本节点的roach进程中配置的ip和端口，写入meta
        # 保存本节点roachAgent的id
        host, port = get_agent_roach_host_and_port()
        host_ip = host
        host_port = f"{host}:{port}"
        # 暂时写入日志
        log.info(f"step2-5-1 sub_job_mount host_port:{host_port}")
        host_port_info_file = os.path.join(self._cache_area, "hostInfo", self._job_id)
        # 文件夹不存在就创建
        if not os.path.exists(host_port_info_file):
            log.info("start to make dir")
            exec_mkdir_cmd(host_port_info_file)
        else:
            log.info("start to remove and make dir")
            self.remove_dir(host_port_info_file)
            exec_mkdir_cmd(host_port_info_file)

        host_port_file = os.path.join(self._cache_area, "hostInfo", self._job_id, f"host_port_{host_ip}")
        # 文件存在就删除
        if os.path.exists(host_port_file):
            log.info("start to remove File")
            os.remove(host_port_file)
        exec_overwrite_file(host_port_file, host_port)
        # 暂时写入日志
        log.info(f"step2-5-1 end to exec_overwrite_file File host_port_file:"
                 f"{host_port_file}, host_port:{host_port}")
        job_info = self.get_job_info()
        write_progress_file_with_speed(job_info, SubJobStatusEnum.COMPLETED.value, 100)
        log.info("step 2-2 end to mount")
        return True

    def sub_job_set(self):
        log.info("step2-3 start to sub_job_set_extend_info")
        # 读取挂载子任务生成的文件
        host_port_file_path = os.path.join(self._cache_area, "hostInfo", self._job_id)
        ips = ""
        ports = ""
        log.info(f"step2-3 start to sub_job_set_extend_info host_port_file_path:{host_port_file_path}")
        lst = os.listdir(host_port_file_path)
        for i in lst:
            path_new = os.path.join(host_port_file_path, i)
            if os.path.isfile(path_new):
                # 读取文件
                host_port = read_file(path_new)
                host_ports = host_port.split(":")
                ips = f"{ips}{host_ports[0]},"
                ports = f"{ports}{host_ports[1]},"
        # 暂时写入日志
        log.info(f"step2-3-1 start to sub_job_set_extend_info _instance_id:"
                 f"{self._instance_id} ips:{ips}, ports:{ports}.")
        ips = ips[:-1]
        ports = ports[:-1]
        # 暂时写入日志
        log.info(f"step2-3-2 start to sub_job_set_extend_info _instance_id:"
                 f"{self._instance_id} ips:{ips}, ports:{ports}")
        # 调用设置扩展信息接口
        self._fun_inst.set_instance_extend_info_twice(self._instance_id, ips, ports, self.client_crt,
                                                      self._extend_info.get("xbsaConfPath", ""))
        return True

    def get_ips_and_ports(self):
        # 读取挂载子任务生成的文件
        host_port_file_path = os.path.join(self._cache_area, "hostInfo", self._job_id)
        ips = ""
        ports = ""
        log.info(f"step2-3 start to sub_job_set_extend_info host_port_file_path")
        lst = os.listdir(host_port_file_path)
        for i in lst:
            path_new = os.path.join(host_port_file_path, i)
            if os.path.isfile(path_new):
                # 读取文件
                host_port = read_file(path_new)
                host_ports = host_port.split(":")
                ips = f"{ips}{host_ports[0]},"
                ports = f"{ports}{host_ports[1]},"
        log.info(f"step2-3-1 start to sub_job_set_extend_info _instance_id:"
                 f"{self._instance_id} ips, ports.")
        ips = ips[:-1]
        ports = ports[:-1]
        return ResourceInfo.config_extend_info(ips, ports, self._extend_info.get("xbsaConfPath", ""))

    def get_backup_id(self):
        # 获取backup_id，该字段下发备份任务时获取，用作恢复方法体
        mount_path_parent = Path(self._cache_area).parent
        # 获取backup_id，该字段下发备份任务时获取，用作恢复方法体
        backup_id_file = os.path.join(mount_path_parent, self.backup_copy_id, "meta", self.backup_copy_id, "backup_id")
        if not os.path.exists(backup_id_file):
            log.warn(f"cache rep not have backup_id exist.")
            # 反向复制场景, backup_id从meta仓获取
            backup_id_file = os.path.join(self._meta_area, "meta", self.backup_copy_id,
                                          "backup_id")
            if not os.path.exists(backup_id_file):
                log.error(f"meta rep not have backup_id exist.")
                return ''
        backup_id = read_file(backup_id_file)
        log.info(f"get restore backup id{backup_id}.{self.get_log_comm()}")
        return backup_id

    def set_restore_body(self):
        backup_id = self.get_backup_id()
        restore_body = {
            "name": "", "availability_zone": "",
            "flavor_ref": "", "volume": {"type": "", "size": ""},
            "vpc_id": "", "subnet_id": "", "security_group_id": "", "password": "",
            "restore_point": {"instance_id": "", "backup_id": ""},
            "xbsa_extension_infos": [
                {"key": "XbsaRoute", "value": ""},
                {"key": "BackupSet", "value": ""}
            ]
        }
        # 必传
        restore_body["name"] = self.restore_extend_info.get("name", "")
        restore_body["flavor_ref"] = self.restore_extend_info.get("flavorRef", "")
        restore_body["volume"]["type"] = self.restore_extend_info.get("volumeType", "")
        restore_body["volume"]["size"] = self.restore_extend_info.get("volumeSize", "")
        restore_body["vpc_id"] = self.restore_extend_info.get("vpcId", "")
        restore_body["subnet_id"] = self.restore_extend_info.get("subnetId", "")
        restore_body["security_group_id"] = self.restore_extend_info.get("securityGroupId", "")
        restore_body["password"] = self.restore_to_new_password
        restore_body["restore_point"]["instance_id"] = self._instance_id
        restore_body["restore_point"]["backup_id"] = backup_id
        token_body_backupset, token_body_route = self.get_ips_and_ports()
        restore_body["xbsa_extension_infos"][0] = token_body_backupset
        restore_body["xbsa_extension_infos"][1] = token_body_route
        availability_zone = self.restore_extend_info.get("availabilityZone", " ")
        if len(availability_zone.split(",")) < 3:
            azs = [availability_zone] * 3
            restore_body["availability_zone"] = ",".join(azs)
        else:
            restore_body["availability_zone"] = availability_zone

        # 非必传
        if "chargeInfo" in self.restore_extend_info:
            if self.restore_extend_info.get("chargeInfo"):
                restore_body["charge_info"] = {"charge_mode": self.restore_extend_info.get("chargeInfo")}
        not_necessary_param = {
            "configuration_id": "configurationId", "port": "port", "time_zone": "timeZone",
            "master_az": "masterAz", "arbitration_az": "arbitrationAz"
        }
        for key, value in not_necessary_param.items():
            if value in self.restore_extend_info and self.restore_extend_info.get(value):
                if self.check_restore_to_new_param(key, value, availability_zone):
                    restore_body[key] = self.restore_extend_info.get(value)

        if self.ca_cert_pem and self.client_crt:
            if self.client_key and self.rand_pass:
                restore_body["xbsa_ssl_certs"] = {
                    "ca_cert_pem": self.ca_cert_pem,
                    "client_crt": self.client_crt,
                    "client_key": self.client_key,
                    "rand_pass": self.rand_pass
                }
        return restore_body

    def check_restore_to_new_param(self, key, value, availability_zone):
        if (key == "master_az" or key == "arbitration_az") and \
                self.restore_extend_info.get(value) not in availability_zone:
            return False
        if (key == "time_zone") and self.restore_extend_info.get(value) == 'no order':
            return False
        return True

    def sub_job_exec_to_new(self):
        log.info(f"step 2-3 start to sub_job_exec_to_new, job id: {self._job_id}")
        restore_body = self.set_restore_body()
        token = HcsManager.get_token(self._json_param_object, self._pid)
        # 备份的url
        ctrl_domain = self.get_params_by_key(self._json_param_object, "businessAddr")
        # 项目id
        project_id = self.get_params_by_key(self._json_param_object, "projectId")
        restore_url = f'{ctrl_domain}/gaussdb/v3.1/{project_id}/instances'
        restore_header = {'X-Auth-Token': token, 'X-Language': 'en-us'}
        log.info(f"restore_url: {restore_url}")
        log.info(f"restore_body: {restore_body}")
        ret, ret_body, ret_header = request_post(restore_url, restore_body, restore_header)
        clear(token)
        if not ret:
            log.error("Failed exec restore!")
            return False
        if ret_body.get("error_code"):
            log.error(f'Exec POST error with return: {ret_body.get("error_msg")}')
            raise Exception(ret_body.get("error_msg", ""))
        log.info('Succeed start restore!')

        # 保存jobid, 用作查询进度
        job_file = os.path.join(self._cache_area, f"jobid_{self._job_id}")
        if not ret_body.get("job_id", ""):
            log.error('Failed get job id!')
            return False
        job_id = ret_body.get("job_id", "")
        exec_overwrite_file(job_file, job_id)
        log.info(f'Succeed save job id: {job_id}!')

        # 循环获取进度写入文件
        restore_result = self.write_restore_progress()
        log.info(f"step 2-3 end to exec_restore, job id: {self._job_id}")
        return restore_result

    def sub_job_exec_to_original(self):
        log.info(f"step 2-3 start to sub_job_exec_to_original, job id: {self._job_id}")
        # 设置扩展信息
        self.sub_job_set()

        # 发起恢复
        backup_id = self.get_backup_id()
        restore_body = self.get_restore_body(backup_id)
        ret_body = HcsManager.start_restore(self._json_param_object, self._pid, restore_body)

        # 保存jobid, 用作查询进度
        job_file = os.path.join(self._cache_area, f"jobid_{self._job_id}")
        if not ret_body.get("job_id", ""):
            log.error('Failed get job id!')
            return False
        job_id = ret_body.get("job_id", "")
        exec_overwrite_file(job_file, job_id)
        log.info(f'Succeed save job id: {job_id}!')

        # 循环获取进度写入文件
        restore_result = self.write_restore_progress()
        log.info(f"step 2-3 end to exec_restore, job id: {self._job_id}")
        return restore_result

    def get_restore_body(self, backup_id):
        if self.get_copy_type() == "log":
            restore_body = {
                "source": {"instance_id": "", "type": "", "backup_id": "", "restore_time": ""},
                "target": {"instance_id": ""}
            }
            restore_body["source"]["instance_id"] = self._instance_id
            restore_body["source"]["type"] = "timestamp"
            restore_time = self.restore_extend_info.get("restoreTimestamp", "")
            restore_body["source"]["restore_time"] = restore_time + "000"
            restore_body["target"]["instance_id"] = self._instance_id
        else:
            restore_body = {"source": {"instance_id": "", "type": "", "backup_id": ""}, "target": {"instance_id": ""}}
            restore_body["source"]["instance_id"] = self._instance_id
            restore_body["source"]["type"] = "backup"
            restore_body["source"]["backup_id"] = backup_id
            restore_body["target"]["instance_id"] = self._instance_id
        return restore_body

    def sub_job_umount(self):
        log.info("step 2-4 start to umount")
        umount_bind_backup_path()
        job_info = self.get_job_info()
        write_progress_file_with_speed(job_info, SubJobStatusEnum.COMPLETED.value, 100)
        log.info("step 2-4 start to umount")
        return True

    def get_job_info(self):
        job_info = JobInfo(pid=self._pid,
                           job_id=self._job_id,
                           sub_job_id=self._sub_job_id,
                           copy_id=self._copy_id,
                           sub_job_type=self._sub_job_name,
                           cache_path=self._cache_area,
                           instance_id=self._instance_id)
        return job_info

    def restore_pre_job(self):
        """
        检查检查region
        @return:
        """
        log.info("step 1-1 start to check region")
        # 检查region, 恢复仅可在同一region下进行
        error_code = GaussDBCode.SUCCESS.value
        target_region = self.get_params_by_key(self._json_param_object, "region")
        protect_region = self.get_copy_params_by_key(self._json_param_object, "region")
        if target_region != protect_region:
            log.error("Check region failed.")
            raise Exception("Check region failed.")

        # 创建xbsa文件
        log.info(f"step 1-3 start to make file for xbsa")
        self.save_bussiness_config()

        # 归档副本不需要写入映射关系
        if self._copy_type != "s3Archive":
            self._write_fs_relationship()
        else:
            if not self._get_objectdata_from_archive():
                return False, GaussDBCode.FAILED.value
            temp_path = os.path.join(f'{self._cache_area}', 'tmp', self._copy_id, 'roach_client')
            if not os.path.exists(temp_path):
                exec_mkdir_cmd(temp_path)
        self.merge_xbsa_info()
        result_file = os.path.join(self._cache_area, "RestorePrerequisiteProgress")
        pathlib.Path(result_file).touch()
        return True, error_code

    def merge_xbsa_info(self):
        # 聚合backupkey.db文件
        # 目标路径
        target_path = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta")
        if not os.path.exists(target_path):
            if not exec_mkdir_cmd(target_path):
                return False
        # 源路径
        mount_path_parent = Path(self._cache_area).parent
        restore_sqlite_path = os.path.join(mount_path_parent, self.backup_copy_id, "meta", self.backup_copy_id,
                                        "restoreSqlite")
        if self.get_copy_type() == "log" and os.path.exists(restore_sqlite_path):
            object_data_path = restore_sqlite_path
        else:
            object_data_path = os.path.join(mount_path_parent, self.backup_copy_id, "meta", self.backup_copy_id,
                                            "mergedb")
        if not os.path.exists(object_data_path):
            log.error(f"No object data path({object_data_path}) in cache repo exists. main task:{self._job_id}")
            object_data_path = os.path.join(self._meta_area, "meta", self.backup_copy_id,
                                            "mergedb")
            if not os.path.exists(object_data_path):
                log.error(f"No object data path({object_data_path}) in meta repo exists. main task:{self._job_id}")
                object_data_path = self.get_object_path(mount_path_parent)
        log.info(f"Get object data path {object_data_path}")
        try:
            ret = self._aggregate_single_copy_object_data(target_path, object_data_path)
        except Exception as err:
            log.error(f"Aggregate object data failed. main task:{self._job_id}, err: {err}")
            return False
        if not ret:
            log.error(f"Aggregate object data failed. main task:{self._job_id}")
            return False
        return True

    def get_object_path(self, mount_path_parent):
        object_data_path = os.path.join(mount_path_parent, self.backup_copy_id, "meta", self.backup_copy_id,
                                        "logdb")
        if not os.path.exists(object_data_path):
            object_data_path = os.path.join(self._meta_area, "meta", self.backup_copy_id,
                                            "logdb")
        return object_data_path

    def restore_prerequisite_progress(self):
        """
        根据是否有进度文件 来判断前置任务检查结果
        @return:
        """
        # 当前根据是否存在RestorePrerequisiteProgress文件来上报前置任务进度
        log.info(f"step 1-4 start to upload restore prepare job progress ")
        job_status = SubJobStatusEnum.COMPLETED.value
        file_path = os.path.join(self._cache_area, "RestorePrerequisiteProgress")
        fail_file_path = os.path.join(self._cache_area, "RestorePrerequisiteFailProgress")
        progress = 100
        if not os.path.exists(file_path):
            log.info("restore_prerequisite_progress is running")
            job_status = SubJobStatusEnum.RUNNING.value
            progress = 0
        if os.path.exists(fail_file_path):
            log.info("restore_prerequisite_progress is fail")
            job_status = SubJobStatusEnum.FAILED.value
            progress = 0
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                               taskStatus=job_status, progress=progress, logDetail=self._logdetail)
        output_result_file(self._pid, output.dict(by_alias=True))

    def create_path_for_backup(self):
        """
        创建过程中所需要的目录
        @return:
        """
        meta_path_object_host = os.path.join(self._meta_area, 'meta', self._copy_id, 'objectmeta', self._host_ip)

        if not os.path.exists(meta_path_object_host):
            if not exec_mkdir_cmd(meta_path_object_host):
                log.error("Create meta_path_object_host failed.")
                return False
        self.change_restore_meta_permission(meta_path_object_host)
        cache_path_object_host = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta")
        if not os.path.exists(cache_path_object_host):
            if not exec_mkdir_cmd(cache_path_object_host):
                log.error(f"Create meta_path_object_host {meta_path_object_host} failed.")
                return False
        self.change_cache_meta_permmison(cache_path_object_host)
        # 创建speed/{host_key} 目录
        cache_path_speed_host = os.path.join(self._cache_area, 'tmp', self._copy_id, 'speed', self._host_ip)
        if not os.path.exists(cache_path_speed_host):
            if not exec_mkdir_cmd(cache_path_speed_host):
                log.error("Create cache_path_speed failed.")
                return False
        self.change_cache_temp_permmison(cache_path_speed_host)
        return True

    def change_cache_temp_permmison(self, cache_path_speed_host):
        set_user_and_group(cache_path_speed_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_speed_host, PermissionNode.PERMISSION_700)
        # cache仓/tmp/copy_id/speed
        cache_path_speed = os.path.join(self._cache_area, 'tmp', self._copy_id, 'speed')
        set_user_and_group(cache_path_speed, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_speed, PermissionNode.PERMISSION_700)
        # cache仓/tmp
        meta_path_tmp = os.path.join(self._cache_area, 'tmp')
        set_user_and_group(meta_path_tmp, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_tmp, PermissionNode.PERMISSION_750)
        # cache仓/tmp/copy_id
        meta_path_tmp_copy_id = os.path.join(self._cache_area, 'tmp', self._copy_id)
        set_user_and_group(meta_path_tmp_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_tmp_copy_id, PermissionNode.PERMISSION_750)

    def change_cache_meta_permmison(self, cache_path_object_host):
        set_user_and_group(cache_path_object_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_path_object_host, PermissionNode.PERMISSION_700)
        # cache仓
        meta_path = os.path.join(self._cache_area)
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # cache仓/meta
        meta_path = os.path.join(self._cache_area, 'meta')
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # cache仓/meta/copy_id
        meta_path_copy_id = os.path.join(self._cache_area, 'meta', self._copy_id)
        set_user_and_group(meta_path_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_copy_id, PermissionNode.PERMISSION_750)
        # cache仓/meta/copy_id/objectmeta
        meta_path_objectmeta = os.path.join(self._cache_area, 'meta', self._copy_id, 'objectmeta')
        set_user_and_group(meta_path_objectmeta, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_objectmeta, PermissionNode.PERMISSION_700)

    def change_restore_meta_permission(self, meta_path_object_host):
        set_user_and_group(meta_path_object_host, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_object_host, PermissionNode.PERMISSION_700)
        # meta仓
        meta_path = os.path.join(self._meta_area)
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # meta仓/meta
        meta_path = os.path.join(self._meta_area, 'meta')
        set_user_and_group(meta_path, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path, PermissionNode.PERMISSION_750)
        # meta仓/meta/copy_id
        meta_path_copy_id = os.path.join(self._meta_area, 'meta', self._copy_id)
        set_user_and_group(meta_path_copy_id, UserInfo.USER_ROOT, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_copy_id, PermissionNode.PERMISSION_750)
        # meta仓/meta/copy_id/objectmeta
        meta_path_objectmeta = os.path.join(self._meta_area, 'meta', self._copy_id, 'objectmeta')
        set_user_and_group(meta_path_objectmeta, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(meta_path_objectmeta, PermissionNode.PERMISSION_700)

    def save_task_info_to_cache(self):
        """
            功能描述：写入cache仓下/tmp/copy_id/taskInfo_{hostKey}.txt
            参数：
            返回值: True or False
        """
        copy_type_map = {
            "full": 1, "increment": 2, "diff": 3, "log": 3, "nativeSnapshot": 5,
            "foreverIncrement": 6, "replication": 7, "s3Archive": 8, "tapeArchive": 9, "clone": 10
        }

        task_info = {
            "repositories": self.get_repo_list(),
            "taskType": TaskTypeEnum.RESTORE.value,
            "copyType": copy_type_map.get(self._copy_type)
        }
        if self._copy_type == "s3Archive":
            archive_ip, archive_port = self.get_archive_ip_port()
            ret_code, ssl_enable = self.get_archive_ssl_enable()
            if not archive_ip or not archive_port or not ret_code:
                return False
            archive_file_servers = []
            for single_ip in archive_ip:
                archive_file_servers.append({"ip": single_ip, "port": archive_port, "sslEnabled": ssl_enable})
            task_info["archiveFileServers"] = archive_file_servers
        task_path = os.path.join(self._cache_area, 'tmp', self._copy_id, f'taskInfo_{self._host_ip}.txt')
        log.info(f"save_task_info_to_cache: {task_info}. {self.get_log_comm()}")
        # 获取文件系统、逻辑端口，保存到agent的cache仓
        exec_overwrite_file(task_path, task_info)
        set_user_and_group(task_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(task_path, PermissionNode.PERMISSION_400)
        if not os.path.exists(task_path):
            log.error(f"Create taskInfo_host_ip_txt file failed.")
            return False
        return True

    def get_archive_ip_port(self):
        """
        归档直接恢复时，获取dme_archive的IP和端口
        返回值: ip列表，端口
        """
        ip_list = []
        port = ""
        archive_info_list = []
        for repo in self._copy.get("repositories", []):
            if repo.get("protocol", "") != RepoProtocalType.S3 \
                    or repo.get("repositoryType", "") == RepositoryDataTypeEnum.CACHE_REPOSITORY:
                continue
            archive_info_list = repo.get("extendInfo", {}).get("service_info", [])
            if archive_info_list:
                break
        if not archive_info_list:
            log.error(f"Fail to get archive info. main task: {self._job_id}")
            return [], ""
        for single_arch in archive_info_list:
            temp_ip = single_arch.get("ip", "")
            temp_port = single_arch.get("port", "")
            if port and port != temp_port:
                log.error(f"All port should be the same. {port}, {temp_port}. main task: {self._job_id}")
                return [], ""
            if temp_ip and temp_port:
                ip_list.append(temp_ip)
                port = temp_port
            else:
                log.error(f"The archive ip({temp_ip} or port({temp_port})). main task: {self._job_id}")
                return [], ""
        return ip_list, port

    def get_archive_ssl_enable(self):
        """
        用于S3直接恢复时，获取是否enable ssl。
        返回值：是否获取成功， 获取的值
        """
        for repo in self._copy.get("repositories", []):
            if repo.get("protocol", "") != RepoProtocalType.S3 \
                    or repo.get("repositoryType", "") == RepositoryDataTypeEnum.CACHE_REPOSITORY:
                continue
            return True, repo.get("extendInfo", {}).get("enable_ssl", "")
        log.error(f"Fail to get archive ssl enable. main task: {self._job_id}")
        return False, ""

    def save_cacheinfo_to_cache(self):
        """
            功能描述：写入DataBackup下/stmp/dws_cacheInfo.txt
            参数：
            返回值: True or False
        """
        # 写cache仓路径到tmp目录， 每个节点 各享一个
        cache_info = {
            "cacheRepoPath": self._cache_area,
            "metaRepoPath": self._meta_area,
            "copyId": self._copy_id,
            "taskId": self._job_id,
            "hostKey": self._host_ip
        }
        cache_info_path = os.path.join(RoachConstant.XBSA_FILE, f"xbsa_cacheInfo_info_hcs_{self._instance_id}.txt")
        exec_overwrite_file(cache_info_path, cache_info)
        set_user_and_group(cache_info_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(cache_info_path, PermissionNode.PERMISSION_400)
        if not os.path.exists(cache_info_path):
            log.error("Create dws_cacheInfo.txt failed.")
            return False
        return True

    def save_bussiness_config(self):
        """
            功能描述：写入DataBackup下/stmp/dws_cacheInfo.txt
            参数：
            返回值: True or False
        """
        # 写cache仓路径到tmp目录， 每个节点 各享一个
        if is_increment_copy(self._json_param_object):
            restore_file_type = BusinessConfigType.INCREMENT_RESTORE_TYPE
        else:
            restore_file_type = BusinessConfigType.FULL_RESTORE_TYPE
        business_type_info_dir = os.path.join(self._cache_area, "tmp", self._copy_id)
        if not os.path.exists(business_type_info_dir):
            if not exec_mkdir_cmd(business_type_info_dir):
                return False
        business_type_info = {"jobType": restore_file_type}
        business_type_info_path = os.path.join(self._cache_area, "tmp",
                                               self._copy_id, RoachConstant.BUSINESS_CONFIG_FILE)
        exec_overwrite_file(business_type_info_path, business_type_info)
        set_user_and_group(business_type_info_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(business_type_info_path, PermissionNode.PERMISSION_400)
        if not os.path.exists(business_type_info_path):
            log.error("Create dws_cacheInfo.txt failed.")
            return False
        return True

    def create_backupkey_db(self):
        """
        创建backupKey.db文件
        """
        if not self._meta_area:
            log.error(f"No usable meta path.")
            return False
        backupkey_name = os.path.join(self._cache_area, "meta", self._copy_id, "objectmeta", "backupkey.db")
        if os.path.islink(backupkey_name):
            log.warn(f"This is a link file, remove it.")
            os.remove(backupkey_name)
        if os.path.isfile(backupkey_name):
            log.debug(f"Db {backupkey_name} file exists.")
            return True
        try:
            object_conn = sqlite3.connect(backupkey_name)
        except Exception as ex:
            log.error(f"Connect sqlite {backupkey_name} failed for {ex}")
            return False
        if not object_conn:
            log.error(f"Connect sqlite {backupkey_name} failed.")
            return False
        object_cur = object_conn.cursor()
        if not object_cur:
            log.error(f"Connect sqlite {backupkey_name} failed.")
            return False
        object_cur.execute("CREATE TABLE [BsaObjTable] ([copyId] VARCHAR(100) NOT NULL,"
                           "[objectSpaceName] VARCHAR(1024),[objectName] VARCHAR(1024) NOT NULL,"
                           "[bsaObjectOwner] VARCHAR(64),"
                           "[appObjectOwner] VARCHAR(64),[copyType] INTEGER(8),"
                           "[estimatedSize] VARCHAR(100) NOT NULL,"
                           "[resourceType] VARCHAR(32),[objectType] INTEGER(8),[objectStatus] INTEGER(8),"
                           "[objectDescription] VARCHAR(100),[objectInfo] VARCHAR(256),[timestamp] VARCHAR(64),"
                           "[restoreOrder] VARCHAR(100),[storePath] VARCHAR(1280) NOT NULL,"
                           "[filesystemName] VARCHAR(256) NOT NULL,[filesystemId] VARCHAR(128) NOT NULL,"
                           "[filesystemDeviceId] VARCHAR(256) NOT NULL,[rsv1] VARCHAR(256),[rsv2] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            log.error(f"Create dws table failed.")
            return False
        log.info(f"Create db {backupkey_name} successfully.")
        xbsa_resource_path = os.path.join(self._cache_area, self._concrete_object_db)
        if os.path.isfile(xbsa_resource_path):
            set_user_and_group(xbsa_resource_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
            set_permisson(xbsa_resource_path, PermissionNode.PERMISSION_700)
        return True

    def get_repo_list(self):
        """
        功能描述： 从任务参数中解析出x8000和文件系统的对应关系
        参数：
        @job_info: JobInfo 任务信息
        返回值： list
        """
        res_list = []
        if self._copy_type == "s3Archive":
            for repo in self._copy.get("extendInfo", {}).get("repositories", []):
                if repo.get("type") != RepositoryDataTypeEnum.DATA_REPOSITORY:
                    continue
                role = repo.get("role", "")
                esn = repo.get("extendInfo", {}).get("esn", "")
                file_systems = self._get_archive_filesystems_from_remote_path(repo)
                res_list.append({"role": role, "deviceSN": esn, "filesystems": file_systems})
            return res_list

        repositories = self._copy.get("repositories", [])
        if self.get_copy_type() == "log":
            log_repositories = self._copy.get("repositories", [])
            data_repositories = self._last_backup_copy.get("repositories", [])
            repositories = log_repositories + data_repositories
        for rep in repositories:
            repository_type = rep.get("repositoryType")
            if repository_type != 1 and repository_type != 3:
                continue
            role = rep.get("role")
            device_sn = rep.get("extendInfo").get("storage_info", {}).get("storage_device")
            log.info(f"device_sn: {device_sn}")
            remote_path = rep.get("remotePath", "").strip("/").split("/")[0]
            mount_path = rep.get("path")
            log.info(f"Get type {repository_type} repo , mount_path is : {mount_path}")
            # 日志备份场景下，给xbsa的mountpath取父路径，恢复时xbsa可据xbsa文件系统表查询多次日志备份副本
            mount_path = self.build_mount_paht_preffix(mount_path, repository_type)
            log.info(f"Cur restore copy type is log, mount path: {mount_path}")
            fs_dict = {
                "id": rep.get("extendInfo", {}).get("fsId", ""),
                "name": remote_path,
                "sharePath": rep.get("remotePath", "").strip("/"),
                "mountPath": mount_path
            }

            # 判断当前文件系统属于哪个X8000
            not_found = True
            not_found = self.scan_file_system(device_sn, fs_dict, not_found, res_list)

            if not_found:
                res_list.append({"role": role, "deviceSN": device_sn, "filesystems": [fs_dict]})
        return res_list

    def scan_file_system(self, device_sn, fs_dict, not_found, res_list):
        for res in res_list:
            if res.get("deviceSN", "") == device_sn:
                filesystems = res.get("filesystems", [])
                no_same_file_system = True
                # 如果当前有同名文件系统，将mountPath合并
                for filesystem in filesystems:
                    no_same_file_system = self.scan_same_system(filesystem, fs_dict, no_same_file_system)
                # 如果当前没有当前没有同名文件系统，独立添加到文件系统列表
                if no_same_file_system:
                    fs_dict_name = fs_dict.get("name", "")
                    log.info(f"no same file system with {fs_dict_name}")
                    res.get("filesystems", []).append(fs_dict)
                not_found = False
        return not_found

    def get_last_copy_info(self, backup_type=None):
        last_copy_type = LastCopyType.last_copy_type_dict.get(1)
        input_param = {
            RpcParamKey.APPLICATION: self._copy.get("protectObject"),
            RpcParamKey.TYPES: last_copy_type,
            RpcParamKey.COPY_ID: "",
            RpcParamKey.JOB_ID: self._job_id
        }
        try:
            result = exec_rc_tool_cmd(self._job_id, RpcParamKey.QUERY_PREVIOUS_CPOY, input_param)
        except Exception as err_info:
            log.error(f"Get last copy info fail.{err_info}")
            return {}
        return result

    def get_fs_relation(self, copy_info):
        """
        用于使用复制副本恢复时，获取映射关系
        """
        copy_type = self.get_copy_type()
        if copy_type == "s3Archive" or copy_type == "tapeArchive":
            extend_info = copy_info.get("extendInfo", {}).get("extendInfo", {})
        else:
            extend_info = copy_info.get("extendInfo", {})
        return extend_info.get("fsRelations", {}).get("relations", [])

    def get_log_fs_relation(self, data_copy_info, log_copy_info):
        """
        用于使用复制副本恢复时，获取映射关系
        """
        extend_info = data_copy_info.get("extendInfo", {})
        data_relations = extend_info.get("fsRelations", {}).get("relations", [])

        log_extend_info = log_copy_info.get("extendInfo", {})
        log_relations = log_extend_info.get("fsRelations", {}).get("relations", [])
        relations = data_relations + log_relations
        log.info(f"get_log_fs_relation success. {self.get_log_comm()}")
        return relations

    def get_tape_fs_relation(self, log_copy_info):
        """
        用于使用复制副本恢复时，获取映射关系
        """
        repo_relations_path = os.path.join(self._meta_area, 'meta', self.backup_copy_id, 'repoRelations')
        old_relations = read_file(repo_relations_path)
        log.info(f"get_tape_fs_relation old relations: {old_relations}")
        relations = []
        tape_repositories = log_copy_info.get("repositories", [])
        index = 0
        for rep in tape_repositories:
            repository_type = rep.get("repositoryType")
            if repository_type != 1:
                continue
            device_sn = rep.get("extendInfo").get("esn")
            fs_id = rep.get("extendInfo").get("fsId")
            role = rep.get("role")
            remote_path = rep.get("remotePath", "").strip("/").split("/")[0]
            tape_relations = [
                {
                    'newEsn': device_sn,
                    'newFsId': fs_id,
                    'newFsName': remote_path,
                    'oldEsn': old_relations[index].get("oldEsn", ""),
                    'oldFsId': old_relations[index].get("oldFsId", ""),
                    'oldFsName': old_relations[index].get("oldFsName", ""),
                    'role': role
                }
            ]
            relations = relations + tape_relations
            index = index + 1
        log.info(f"get_tape_fs_relation new relations: {relations}")
        return relations

    def get_available_path(self):
        if self.get_copy_type() == GaussCopyType.S3ARCHIVE:
            return self.get_available_path_archive()
        elif self._copy_type == GaussCopyType.LOG:
            return self.get_available_path_log()
        else:
            return self.get_available_path_common()

    def get_available_path_common(self):
        """
        功能描述：从所有元数据目录和cache仓里拿一套可访问的目录
        """
        cache_path_list = self.get_cache_path()
        if not cache_path_list or len(cache_path_list) == 0:
            log.error("Fail to get cache path")
            return "", "", ""
        cache_path = ""
        for item in cache_path_list:
            if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                log.info("Get cache path success")
                cache_path = item
                break
        metadata_path = ""
        metadata_path_list = self.get_metadata_path()
        if metadata_path_list:
            for item in metadata_path_list:
                if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                    metadata_path = item
                    break
        else:
            log.warn("Fail to get metadata path")
        data_path = self.get_data_path()
        return metadata_path, cache_path, data_path

    def get_available_path_log(self):
        """
        功能描述：从所有元数据目录和cache仓里拿一套可访问的目录
        """
        # 日志备份场景下，日志副本不携带cache仓，从上一次备份副本下取log仓
        cache_path_list = self.get_last_copy_cache_path()
        if not cache_path_list or len(cache_path_list) == 0:
            log.error("Fail to get cache path")
            return "", "", ""
        cache_path = ""
        for item in cache_path_list:
            if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                log.info(f"Get cache path success. {item}")
                cache_path = item
                break
        metadata_path = ""
        metadata_path_list = self.get_metadata_path()
        if metadata_path_list:
            for item in metadata_path_list:
                if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                    metadata_path = item
                    break
        else:
            log.warn("Fail to get metadata path")
        data_path = self.get_log_data_path()
        return metadata_path, cache_path, data_path

    def get_available_path_archive(self):
        """
        功能描述：从所有元数据目录和cache仓里拿一套可访问的目录
        """
        cache_path_list = self.get_cache_path()
        if not cache_path_list or len(cache_path_list) == 0:
            log.error("Fail to get cache path")
            return "", "", ""
        cache_path = ""
        for item in cache_path_list:
            if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                log.info("Get cache path success")
                cache_path = item
                break
        metadata_path = cache_path
        data_path = ""
        return metadata_path, cache_path, data_path

    def get_cache_path(self):
        """
        get cache repository. return a list ['', '', '']
        """
        cache_path = self._get_single_type_repo_path(RepositoryDataTypeEnum.CACHE_REPOSITORY, self._copy)
        return cache_path

    def get_cache_area(self):
        """
        get cache repository. return a list ['', '', '']
        """
        return self._cache_area

    def get_last_copy_cache_path(self):
        """
        get cache repository. return a list ['', '', '']
        """
        last_copy_cache_path = self._get_single_type_repo_path(RepositoryDataTypeEnum.CACHE_REPOSITORY,
                                                               self._last_backup_copy)
        check_path_safe(last_copy_cache_path)
        return last_copy_cache_path

    def get_metadata_path(self):
        """
        get metadata repository. return a list ['', '', '']
        """
        metadata_path = self._get_single_type_repo_path(RepositoryDataTypeEnum.META_REPOSITORY, self._copy)
        check_path_safe(metadata_path)
        return metadata_path

    def get_data_path(self):
        data_repo_path = self._get_single_type_repo_path(RepositoryDataTypeEnum.DATA_REPOSITORY, self._copy)
        data_path = ""
        for item in data_repo_path:
            if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                data_path = item
                break
        if not data_path:
            log.error("Have no can available data path")
            return ""
        data_path = os.path.join(data_path, "data")
        check_path_safe(data_path)
        return data_path

    def get_log_data_path(self):
        data_repo_path = self._get_single_type_repo_path(RepositoryDataTypeEnum.LOG_REPOSITORY, self._copy)
        log_data_path = ""
        for item in data_repo_path:
            if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                log.info(f"Get log data path success. {item}")
                log_data_path = item
                break
        if not log_data_path:
            log.error("Have no can available log data path")
            return ""
        log_data_path = os.path.join(log_data_path, "data")
        check_path_safe(log_data_path)
        return log_data_path

    @log_start()
    def _download_repo_path_from_archive(self, repo_path, temp_dir, copy_id, archive_addr):
        archive_copy_id = self._copy.get("id", "")
        restore_object = repo_path
        if not restore_object:
            log.error(f"Fail to get prefix. main task:{self._job_id}")
            return False
        restore_object = f"/{restore_object}/"
        log.debug(f"The archive search path is {restore_object}")
        try:
            restore_object = base64.b64encode(restore_object.encode(encoding='utf-8', errors='strict'))
        except Exception as err:
            log.error(f"Fail to transfer {restore_object} to bytes for {err}. main task:{self._job_id}")
            return False
        restore_object = restore_object.decode('utf-8')
        # 修改资源id目录权限
        if self._cache_area == "":
            log.error(f"No cache path can be read or written. cache_path: {self._cache_area}, "
                      f"main task:{self._job_id}")
            return False
        os.chmod(os.path.dirname(self._cache_area), PERMISSION_755)
        # 1.4 执行获取命令
        param = ExecFuncParam(os_user="rdadmin", cmd_list=[
            "{agentcli_path} GetFileFromArchive {archive_copy_id} {archive_addr} {target_objectdata_path} "
            f"{restore_object}"],
                              fmt_params_list=[[("agentcli_path", self._agentcli_path, ValidatorEnum.PATH_CHK_FILE),
                                                ("archive_copy_id", archive_copy_id, ValidatorEnum.REGEX_CHK_UUID4),
                                                ("archive_addr", archive_addr, ValidatorEnum.CHAR_CHK_COMMON),
                                                ("target_objectdata_path", self._target_objectdata_path,
                                                 ValidatorEnum.PATH_CHK_DIR)]],
                              shell_file="/bin/sh", chk_exe_owner=False)
        try:
            result, last_before = su_exec_cmd_list(param)
        except Exception as err:
            log.error(f"execute_cmd {restore_object} to bytes for {err}. main task:{self._job_id}")
            return False
        log.info(f"su_exec_cmd_list,{result}")
        if result != CMDResult.SUCCESS.value:
            log.error(f"Get object data from archive failed. last_before:{last_before}. "
                      f"main task:{self._job_id}")
            return False
        log.debug(f"Execute cmd success. restore_object: {restore_object}, output: {last_before}")
        return True

    @log_start()
    def _get_objectdata_from_archive(self):
        # 1.1 获取副本ID
        copy_id = self._copy_id
        if not copy_id:
            log.error(f"Fail to get copy id. main task:{self._job_id}")
            return False
        # 1.2 获取archive地址
        archive_ip, archive_port = self.get_archive_ip_port()
        if not archive_ip or not archive_port:
            return False
        archive_addr = ""
        for ip_address in archive_ip:
            archive_addr = "".join([archive_addr, f"{ip_address},"])
        archive_addr = f"{archive_addr.strip(',')}:{archive_port}"
        log.debug(f"Archive address : {archive_addr}")
        # 1.3 获取从archive获取数据写入的本地路径
        if self._cache_area == "":
            log.error(f"No cache path can be read or written. cache_path: {self._cache_area}, "
                      f"main task:{self._job_id}")
            return False
        self._target_objectdata_path = os.path.join(self._cache_area, "meta", copy_id, "objectmeta")
        if not os.path.exists(self._target_objectdata_path):
            os.makedirs(self._target_objectdata_path)
        temp_dir = os.path.join(self._cache_area, "meta")
        if not exec_lchown_dir_recursively(temp_dir, "rdadmin", "rdadmin"):
            log.error(f"Fail to change user/group for {temp_dir}. main task:{self._job_id}")
            return False
        meta_repo_path = self.get_archive_meta_prefix()
        ret = self._download_repo_path_from_archive(meta_repo_path, temp_dir, copy_id, archive_addr)
        if not ret:
            log.error("Download meta repo data from archive failed")
            return False
        return True

    def get_archive_meta_prefix(self):
        """
        用于归档到云副本直接恢复。在前置任务获取对象数据时，需要拿到对象数据目标的绝对路径前缀
        """
        remote_path = []
        for repo in self._copy.get("extendInfo", {}).get("repositories", []):
            if repo.get("role", "") == StorageRole.MASTER and \
                    repo.get("type", "") == RepositoryDataTypeEnum.DATA_REPOSITORY:
                remote_path = repo.get("remotePath", [])
                break
        if not remote_path:
            log.error(f"Fail to get archive remote path. main task: {self._job_id}")
            return ""
        target_path = ""
        repository_type = RepositoryDataTypeEnum.META_REPOSITORY
        for path in remote_path:
            if path.get("type", "") == repository_type:
                target_path = path.get("path", "")
                break
        pos = target_path.index("source_policy")
        target_path = target_path[-(len(target_path) - pos):]
        target_path = os.path.join(target_path, "objectmeta")
        return target_path

    def _get_archive_filesystems_from_remote_path(self, repo):
        file_systems = []
        for single_path in repo.get("remotePath", []):
            if single_path.get("type", "") == RepositoryDataTypeEnum.META_REPOSITORY:
                continue
            logic_ips_list = []
            for lif_ip in single_path.get("remoteHost", []):
                logic_ips_list.append({"ip": lif_ip.get("ip")})
            fs_dict = {
                "id": single_path.get("id", ""),
                "name": single_path.get("path").split('/')[1],
                "sharePath": single_path.get("path").split('/')[1],
                "logicIps": logic_ips_list
            }
            file_systems.append(fs_dict)
        log.debug(f"Archive file systems: {file_systems}. main task: {self._job_id}")
        return file_systems

    def _aggregate_single_copy_object_data(self, cache_path, object_data_path):
        """
        功能描述：将单个副本里的所有对象数据文件聚合成一个
        cache_path：合成的文件要存放的路径
        object_data_path：被合成的对象数据文件存放路径
        """
        log.info(f"Start to merge db in {object_data_path} to {cache_path}")
        if os.path.islink(os.path.join(cache_path, self._concrete_object_db)):
            os.remove(os.path.join(cache_path, self._concrete_object_db))

        try:
            object_conn = sqlite3.connect(os.path.join(cache_path, self._concrete_object_db))
        except Exception as ex:
            log.error(f"Connect sqlite {self._concrete_object_db} failed for {ex}.main task:{self._job_id}")
            return False
        object_cur = object_conn.cursor()
        if not object_conn or not object_cur:
            log.error(f"Connect sqlite {self._concrete_object_db} failed.main task:{self._job_id}")
            return False
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables:
            object_cur.execute("CREATE TABLE [BsaObjTable] ([copyId] VARCHAR(100) NOT NULL,"
                               "[objectSpaceName] VARCHAR(1024),[objectName] VARCHAR(1024) NOT NULL,"
                               "[bsaObjectOwner] VARCHAR(64),"
                               "[appObjectOwner] VARCHAR(64),[copyType] INTEGER(8),"
                               "[estimatedSize] VARCHAR(100) NOT NULL,"
                               "[resourceType] VARCHAR(32),[objectType] INTEGER(8),[objectStatus] INTEGER(8),"
                               "[objectDescription] VARCHAR(100),[objectInfo] VARCHAR(256),[timestamp] VARCHAR(64),"
                               "[restoreOrder] VARCHAR(100),[storePath] VARCHAR(1280) NOT NULL,"
                               "[filesystemName] VARCHAR(256) NOT NULL,[filesystemId] VARCHAR(128) NOT NULL,"
                               "[filesystemDeviceId] VARCHAR(256) NOT NULL,[rsv1] VARCHAR(256),[rsv2] VARCHAR(256));")
        object_tables = object_cur.execute("select name from sqlite_master where type='table'").fetchall()
        if not object_tables or len(object_tables) == 0:
            log.error(f"Create dws table failed. main task:{self._job_id}")
            return False

        db_file_list = get_all_db_files(object_data_path)
        if not db_file_list:
            log.error("No db file")
            return False
        for db_file in db_file_list:
            temp_object_cur = sqlite3.connect(db_file).cursor()
            for line in temp_object_cur.execute("select * from BsaObjTable").fetchall():
                str_line = str(line).replace("None", "''")
                object_cur.execute(f"insert into BsaObjTable values {str_line}")
                object_conn.commit()
        object_cur.close()
        object_conn.close()

        xbsa_resource_path = os.path.join(cache_path, self._concrete_object_db)
        if os.path.isfile(xbsa_resource_path):
            log.info("change merge sqlite owner")
            set_user_and_group(xbsa_resource_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
            set_permisson(xbsa_resource_path, PermissionNode.PERMISSION_700)
        log.info("success to merge sqlite")
        return True

    def _write_fs_relationship(self):
        # 创建映射关系文件存放的目录并更改权限，只用于备份副本，复制副本，复制副本归档到云的副本
        log.info(f"start to write_fs_relationship")
        cache_path = self._cache_area
        if not cache_path:
            log.error(f"No cache_path path can be read or written."
                      f"metadata_path:{cache_path}, main task:{self._job_id}")
            return False
        temp_relation_path = os.path.join(cache_path, "meta", self._copy_id, "replication")
        if self.get_copy_type() == "tapeArchive":
            temp_relation_path = os.path.join(self._meta_area, "meta",
                                              self._copy_id, "archiveDownload")
        log.info(f"_write_fs_relationship temp_relation_path {temp_relation_path}")
        if not os.path.exists(temp_relation_path):
            log.info(f"No replication dir: {temp_relation_path}")
            if not exec_mkdir_cmd(temp_relation_path):
                return False
        temp_relation_file = os.path.join(temp_relation_path, "filesystemRelationship.txt")
        if os.path.islink(temp_relation_file):
            try:
                os.remove(temp_relation_file)
            except Exception as err:
                log.error(f"Relation file is a link file. {err}")
                return False
        # 获取映射关系
        if self.get_copy_type() == "log":
            relation_info = self.get_log_fs_relation(self._last_backup_copy, self._copy)
        elif self.get_copy_type() == "tapeArchive":
            relation_info = self.get_tape_fs_relation(self._copy)
        else:
            relation_info = self.get_fs_relation(self._copy)
        if not relation_info:
            log.error(f"Fail to get relation info. main task:{self._job_id}")
            return False
        log.info(f"write fs relationship get relation_info: {relation_info}")
        json_data = {"relations": relation_info}
        try:
            if not exec_overwrite_file(temp_relation_file, json_data):
                log.error(f"Fail to write relation file")
                return False
        except Exception as err:
            log.error(f"Fail to write relation file for {err}")
            return False
        log.info("write fs relationship success")
        set_user_and_group(temp_relation_path, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(temp_relation_path, PermissionNode.PERMISSION_700)
        set_user_and_group(temp_relation_file, UserInfo.USER_RDADMIN, UserInfo.USER_RDADMIN)
        set_permisson(temp_relation_file, PermissionNode.PERMISSION_400)
        log.info(f"end to write_fs_relationship")
        return True

    def _get_param_from_file(self):
        log.info("Start to get restore param file.")
        job_info = self._json_param_object.get("job")
        if not job_info:
            log.error(f"No job info in param file, main task: {self._job_id}, pid: {self._pid}")
            return False
        copies_info = job_info.get("copies")
        if not copies_info or len(copies_info) == 0:
            log.error(f"No copy info in param file, main task: {self._job_id}, pid: {self._pid}")
            return False
        self._copy = copies_info[len(copies_info) - 1]
        if len(copies_info) > 1:
            self._last_backup_copy = copies_info[len(copies_info) - 2]
        self._initialized = True
        log.info("Analyze param file successfully.")
        return True

    def _get_single_type_repo_path(self, target_type, copy_info):
        repositories = copy_info.get("repositories")
        if not repositories or len(repositories) == 0:
            log.error(f"Fail to get repositories, main task: {self._job_id}.")
            return ""
        tmp_repo = {}
        for repo in repositories:
            if repo.get("repositoryType") == target_type:
                tmp_repo = repo
                break
        if not tmp_repo:
            log.warn(f"Fail to get type({target_type}), main task: {self._job_id}.")
            return ""
        tmp_path = tmp_repo.get("path")
        if not tmp_path:
            log.warn(f"Fail to get path {target_type}, main task: {self._job_id}.")
            return ""
        return tmp_path

    def _handle_single_speed_file(self, speed_file):
        with open(speed_file, "r") as tmp_fo:
            lines = tmp_fo.readlines()
            if len(lines) == 0:
                log.warn(f"Fail to read size file {speed_file}")
                return 0
            speed_dict = json.loads(lines[0])

            log.debug(f"speed_dict in {speed_file}")
            single_host_size = speed_dict.get("totalSizeInMB", 0)
            if not single_host_size:
                log.warn(f"No totalSizeInMB in {speed_file} for {self._job_id}")
                return 0
            return single_host_size
