#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import time
import shutil
from threading import Thread

from common.common import execute_cmd
from common.common_models import SubJobDetails, LogDetail
from common.const import DBLogLevel
from exchange import log
from exchange.commons.common import output_execution_result_ex, read_file, \
    report_job_details_by_rpc, traverse_folder
from exchange.commons.const import ExchangeJobInfo, ParamConstant, SubJobStatusEnum, ExchangeReportDBLabel, \
    RepositoryDataTypeEnum, ExchangeRestoreTargetType, ExchangeDatabaseCopyStatus, PathSymbol, CopyDataTypeEnum, \
    AgentConstant, BodyErr, WAITE_TIME
from exchange.commons.exchange_exception import ExchangeInternalException
from exchange.commons.exchange_function_tool import log_start_end, out_result_with_job_info, \
    progress_notify_with_job_info
from exchange.commons.exchange_param_parse import ExchangeParamParse
from exchange.commons.powershell import PS_LOC, TIMEOUT
from exchange.restore.exchange_server_api.exchange_cmd_client import ExchangeCmdClient
from exchange.restore.restore_param_parser import RestoreParamParser


# Exchange备份公共基类
def report_restore_table_failed_progress(job_info: ExchangeJobInfo, node_ip, label, db_name, err_code=None):
    """
    恢复任务上报失败的lab
    :param job_info: 任务信息
    :param node_ip: 节点ip
    :param label: lab信息
    :param db_name: 数据库名称
    :return:无返回值
    """
    log_detail = LogDetail(logInfo=label,
                           logInfoParam=[node_ip, db_name],
                           logLevel=DBLogLevel.ERROR.value)
    if err_code:
        log_detail = LogDetail(logInfo=label,
                               logInfoParam=[node_ip, db_name],
                               logDetail=err_code,
                               logLevel=DBLogLevel.ERROR.value)
    output = SubJobDetails(taskId=job_info.job_id, subTaskId=job_info.sub_job_id, progress=100,
                           logDetail=[log_detail], taskStatus=SubJobStatusEnum.FAILED.value, dataSize=0).dict(
        by_alias=True)
    report_job_details_by_rpc(job_info.job_id, job_info.pid, job_info.sub_job_id, output)


class ExchangeRestoreSubJob:
    def __init__(self, pid, param):
        self._pid = pid
        self._param = param
        self._exchange_cmd_client = ExchangeCmdClient(pid)
        self._progress = 5

    def restore(self, job_info: ExchangeJobInfo):
        self.report_restore_sub_start(job_info)
        self.create_report_restore_progress_thread(job_info)
        # 恢复前准备
        ret = self.do_restore_preparation(job_info)
        if not ret:
            return False

        # 调用Vss恢复
        ret = self.do_restore_with_vss(job_info)
        if not ret:
            return False

        # 环境清理
        ret = self.do_restore_clean_up(job_info)
        if not ret:
            return False

        # 上报任务结束
        self.report_restore_complete(job_info)
        return True

    def report_restore_complete(self, job_info):
        self._progress = 100
        if RestoreParamParser.get_restore_type(self._param) == CopyDataTypeEnum.LOG_COPY:
            repositories = RestoreParamParser.get_exchange_database_copy_repo_for_log(self._param)
        else:
            repositories = RestoreParamParser.get_exchange_database_copy_repo(self._param)
        data_path = ""
        data_total_size = 0
        for repository in repositories:
            if RestoreParamParser.get_restore_type(self._param) == CopyDataTypeEnum.LOG_COPY:
                if repository.get("repositoryType") == RepositoryDataTypeEnum.LOG_REPOSITORY.value:
                    data_path = repository.get("path", [""])[0]
                    data_total_size = traverse_folder(data_path)
            else:
                database_name_maps = RestoreParamParser.get_database_name_maps(self._param)
                data_total_size = self.get_restore_size(database_name_maps)
        log_detail = LogDetail(logInfo=ExchangeReportDBLabel.SUB_JOB_SUCCESS,
                               logInfoParam=[job_info.sub_job_id],
                               logLevel=DBLogLevel.INFO.value)
        output = SubJobDetails(taskId=job_info.job_id, subTaskId=job_info.sub_job_id, progress=self._progress,
                               logDetail=[log_detail], taskStatus=SubJobStatusEnum.COMPLETED.value,
                               dataSize=int(data_total_size / 1024)).dict(by_alias=True)
        report_job_details_by_rpc(job_info.job_id, job_info.pid, job_info.sub_job_id, output)

    def get_restore_size(self, database_list):
        total_size = 0
        for database in database_list:
            source_name = database.get("source_db_name", "")
            # 从备份元数据文件，取出原数据库信息
            copy_meta_data = RestoreParamParser.get_copy_meta_data_info(source_name, self._param)
            agent_id = copy_meta_data.get("agentId", "")
            database_guid = copy_meta_data.get("uuid", "")
            # 获取数据仓和元数据仓路径
            meta_path, data_path = self.get_meta_and_data_path(agent_id, database_guid)
            total_size = total_size + traverse_folder(data_path)
        return total_size

    def report_restore_sub_start(self, job_info):
        self._progress = 5
        log_detail = LogDetail(logInfo=ExchangeReportDBLabel.START_RESTORE_LABEL,
                               logInfoParam=[job_info.sub_job_id],
                               logLevel=DBLogLevel.INFO.value)
        output = SubJobDetails(taskId=job_info.job_id, subTaskId=job_info.sub_job_id, progress=self._progress,
                               logDetail=[log_detail], taskStatus=SubJobStatusEnum.RUNNING.value,
                               dataSize=0).dict(by_alias=True)
        report_job_details_by_rpc(job_info.job_id, job_info.pid, job_info.sub_job_id, output)

    @log_start_end()
    def do_restore_preparation(self, job_info: ExchangeJobInfo):
        """
        恢复任务准备工作
        1. 检查目标环境exchange server版本是否一致
        2. 检查目标环境是否存在同名
        3. 停止DAG组被动副本复制
        4. 卸载数据库
        5. 检查数据库卸载情况
        6. 设置数据库可还原覆盖属性
        """
        log.info("check version success")
        # 获取新旧数据库名称映射关系
        database_name_maps = RestoreParamParser.get_database_name_maps(self._param)
        # 获取节点ip
        node_ip = ExchangeParamParse.get_restore_node_ip(self._param)
        log.info("stop copy success")
        # 设置数据库可被还原覆盖属性
        for database_name_map in database_name_maps:
            target_name = database_name_map.get("source_target_name", "")
            try:
                self.set_exchange_database_allow_restore(target_name)
                self.report_restore_table_success_progress(
                    job_info, node_ip, ExchangeReportDBLabel.DATABASE_ALLOW_RESTORE_SUCCESS, target_name, 0)
            except ExchangeInternalException:
                log.info(f"Do restore preparation failed, set allow {target_name} restore fail")
                report_restore_table_failed_progress(
                    job_info, node_ip, ExchangeReportDBLabel.DATABASE_ALLOW_RESTORE_FAILED, target_name)
                return False

        _, target_log_path = self._exchange_cmd_client.get_exchange_database_edb_and_log_path(target_name)
        # 防止日志文件变动影响日志恢复
        while True:
            file_len_front = os.listdir(target_log_path)
            time.sleep(WAITE_TIME)
            file_len_back = os.listdir(target_log_path)
            if file_len_front == file_len_back:
                break

        log.info("set allow restore success")
        # 卸载数据库
        auto_dismount = RestoreParamParser.get_auto_dismount(self._param)
        if "true" == auto_dismount:
            for database_name_map in database_name_maps:
                target_name = database_name_map.get("source_target_name", "")
                try:
                    self.dismount_exchange_database(target_name)
                except ExchangeInternalException:
                    log.info(f"Do restore preparation failed, dismount {target_name} fail")
                    report_restore_table_failed_progress(
                        job_info, node_ip, ExchangeReportDBLabel.DATABASE_RESTORE_DISMOUNT_FAILED, target_name)
                    return False

        log.info("dismount success")
        # 检查数据库活动副本卸载情况,返回结果
        return self.check_dismount_status(database_name_maps, job_info, node_ip)

    def check_dismount_status(self, database_name_maps, job_info, node_ip):
        exchange_dismount_status = True
        for database_name_map in database_name_maps:
            target_name = database_name_map.get("source_target_name", "")
            dismount_status = False
            for _ in range(AgentConstant.RETRY_TIMES):
                dismount_status = self.check_exchange_database_dismounted_status(target_name)
                if not dismount_status:
                    log.warn(f"Failed getting Microsoft Exchange dismount status,\
                                                            wait for {AgentConstant.RETRY_WAIT_SECONDS} seconds")
                    time.sleep(AgentConstant.RETRY_WAIT_SECONDS)
            # 存在重试后仍失败的数据库则显示失败
            exchange_dismount_status = exchange_dismount_status and dismount_status
            if exchange_dismount_status:
                self.report_restore_table_success_progress(
                    job_info, node_ip, ExchangeReportDBLabel.DATABASE_RESTORE_DISMOUNT_SUCCESS, target_name, 0)
            else:
                report_restore_table_failed_progress(
                    job_info, node_ip, ExchangeReportDBLabel.DATABASE_RESTORE_UNMOUNT, target_name)
        log.info("check dismount complete")
        return exchange_dismount_status

    @log_start_end()
    def suspend_passive_copy(self, database_name: str):
        """
        暂停数据库被动副本复制
        :param database_name: 数据库名称
        :return:
        """
        log.info(f'Suspend exchange passive copy database:{database_name}')
        # 查询所有复制副本
        copies = self.get_exchange_database_copies(database_name)
        for copy in copies:
            # 暂停healthy状态副本的复制和激活,mounted为活动副本，
            # Suspended为暂停的副本.
            if ExchangeDatabaseCopyStatus.Healthy == copy.get("Status").get("value"):
                self._exchange_cmd_client.suspend_passive_copy(copy.get("Name"))

    @log_start_end()
    def check_exchange_version_consistency(self, expect_version: str):
        """
        检查exchange server版本一致性
        :param expect_version: 期望版本
        :return: true,false
        """
        # 校验exchange server版本(比较大版本例如15.1)
        copy_version = self._exchange_cmd_client.get_exchange_version()[0:4]
        log.info(f"exception version:{expect_version}, copy version:{copy_version}")
        if expect_version[0:4] == copy_version[0:4]:
            log.info('Exchange version is consistent.')
            return True
        return False

    @log_start_end()
    def check_exchange_database_exist(self, database_name: str):
        """
        检查exchange database是否存在
        :param database_name: 数据库名称
        :return: true,false
        """
        # 校验exchange guid
        try:
            if self._exchange_cmd_client.get_exchange_database_guid(database_name) is not None:
                log.info(f'Exchange database:{database_name} is exist.')
                return True
            log.error(f'Exchange database:{database_name} is not exist.')
            return False
        except ExchangeInternalException:
            log.error(f'Exchange database:{database_name} is not exist.')
            return False

    @log_start_end()
    def set_exchange_database_allow_restore(self, database_name: str):
        """
        设置数据库属性允许覆盖恢复
        :param database_name: 数据库名称
        :return:
        """
        log.info(f'Set exchange database:{database_name} allow restore')
        self._exchange_cmd_client.set_exchange_database_allow_file_restore(database_name)

    def dismount_exchange_database(self, database_name: str):
        """
        卸载exchange数据库
        :param database_name: 数据库名称
        :return:
        """
        log.info(f'Dismount exchange database:{database_name}')
        self._exchange_cmd_client.dismount_exchange_database(database_name)

    def check_exchange_database_dismounted_status(self, database_name: str):
        """
        检查活动数据库副本卸载状态
        :param database_name:
        :return:
        """
        log.info(f'check exchange database :{database_name} dismount status')
        # 生成指定副本名称（database_name\server_name）
        code, hostname, err = execute_cmd([PS_LOC, "hostname"], timeout=TIMEOUT, cmd_array_flag=True)
        hostname = hostname.strip()
        copy_name = database_name + PathSymbol.SLASHES + hostname
        log.info(f'check exchange database copy:{copy_name} dismount status')
        # 查询数据库对应副本
        copies = self.get_exchange_database_copies(copy_name)
        if copies.__len__() != 1:
            log.error(f"can not find or multi copy:{copy_name}")
            return False
        # 判断活动副本状态是否为Dismounted
        if ExchangeDatabaseCopyStatus.DISMOUNTED != copies[0].get("Status").get("value"):
            log.error("exchange database still mounted")
            return False
        log.info(f'exchange database :{database_name} is dismounted')
        return True

    def get_exchange_database_copies(self, copy_name: str):
        """
        获取数据副本
        :param copy_name: 数据库名称
        :return:
        """
        log.info(f'Get exchange database copies')
        copies = self._exchange_cmd_client.get_exchange_database_copies(copy_name)
        return copies

    @log_start_end()
    def do_restore_with_vss(self, job_info):
        database_name_maps = RestoreParamParser.get_database_name_maps(self._param)
        for database_name_map in database_name_maps:
            source_name = database_name_map.get("source_db_name", "")
            target_name = database_name_map.get("source_target_name", "")
            log.info(f"Get source name: {source_name}, target name {target_name}")
            if not self.restore_single_database(job_info, source_name, target_name):
                return False
        return True

    def restore_single_database(self, job_info, source_name, target_name):
        node_ip = ExchangeParamParse.get_restore_node_ip(self._param)
        # 从备份元数据文件，取出原数据库信息
        try:
            copy_meta_data = RestoreParamParser.get_copy_meta_data_info(source_name, self._param)
        except Exception as err:
            log.error(f"Get copy meta data info error, err: {err}")
            report_restore_table_failed_progress(
                job_info, node_ip, ExchangeReportDBLabel.DATABASE_RESTORE_VSS_FAILED,
                target_name, BodyErr.COPY_META_DATA_NOT_EXIST.value)
            return False
        agent_id = copy_meta_data.get("agentId", "")
        database_guid = copy_meta_data.get("uuid", "")
        target_edb_path, target_log_path = self._exchange_cmd_client. \
            get_exchange_database_edb_and_log_path(target_name)

        # 获取日志前缀，并删除日志路径和数据路径下的edb,log,chk文件(可以搞成先备份后删除，任务失败后置任务执行回滚操作)
        try:
            self.delete_database_file(target_edb_path, target_log_path)
        except Exception as err:
            log.error("Exception during del old db: %s.", str(err))
            return False

        if RestoreParamParser.get_restore_type(self._param) == CopyDataTypeEnum.LOG_COPY:
            # 日志恢复场景下，将日志恢复文件， 从log仓复制到data仓
            self.copy_restore_log_file_to_data(agent_id, database_guid, target_name, job_info)

        # 获取数据仓和元数据仓路径
        meta_path, data_path = self.get_meta_and_data_path(agent_id, database_guid)

        # 日志前缀(根据获取数据仓下.chk的文件名)
        log_prefix = self.get_exchange_database_log_file_prefix(database_name=target_name)

        # 目标数据库guid，根据目标数据库名称进行查询
        target_database_guid = self.get_exchange_database_guid(database_name=target_name)

        # 目标edb和log路径加上引号
        target_edb_path = "\"" + target_edb_path + "\""
        target_log_path = "\"" + target_log_path + "\""

        cmd = f"{ParamConstant.VSS_TOOL_PATH} Restore " \
              f"{meta_path} {data_path} {target_edb_path} {target_log_path}" \
              f" {database_guid} {target_database_guid} {log_prefix}"
        ret, stdout, stderr = execute_cmd(cmd)
        if not ret:
            log.error("Call vss fail err is %s", stderr)
            report_restore_table_failed_progress(
                job_info, node_ip, ExchangeReportDBLabel.DATABASE_RESTORE_VSS_FAILED, target_name)
            return False
        data_total_size = traverse_folder(data_path)
        self.report_restore_table_success_progress(
            job_info, node_ip, ExchangeReportDBLabel.DATABASE_RESTORE_VSS_SUCCESS, target_name,
            int(data_total_size / 1024))
        return True

    def report_restore_log_to_time(self, job_info: ExchangeJobInfo):
        # 数据库({0})正在执行日志恢复，此次用户选择恢复时间点为({1})，实际恢复的文件为({2})，该文件对应的实际恢复的时间范围为({3})到({4})。
        log_detail = LogDetail(logInfo=ExchangeReportDBLabel.DATABASE_BACKUP_RESTORE_TIME_INFO_SUCCESS,
                               logInfoParam=[job_info.restore_db_name, job_info.restore_target_time,
                                             job_info.restore_file, job_info.restore_begin_time,
                                             job_info.restore_end_time],
                               logLevel=DBLogLevel.INFO.value)
        log.info(f"Get restore log to time detail {log_detail}")
        output = SubJobDetails(taskId=job_info.job_id, subTaskId=job_info.sub_job_id, progress=self._progress,
                               logDetail=[log_detail], taskStatus=SubJobStatusEnum.RUNNING.value,
                               dataSize=0).dict(by_alias=True)
        report_job_details_by_rpc(job_info.job_id, job_info.pid, job_info.sub_job_id, output)

    def copy_restore_log_file_to_data(self, agent_id, database_guid, target_name, job_info: ExchangeJobInfo):
        # 取日志仓、数据仓路径
        data_mount_path = RestoreParamParser.get_data_restore_repositories_data_path(self._param)
        log_mount_path = RestoreParamParser.get_log_restore_repositories_log_path(self._param)
        log.info(f"Get data mount path {data_mount_path}, log mount path {log_mount_path}.")

        # 取日志数据和时间戳的映射文件
        mailbox_database_guid = PathSymbol.MAILBOX_PREFIX + database_guid
        file_timestamp_map_path = os.path.join(log_mount_path,
                                               mailbox_database_guid,
                                               "log_to_time")
        file_timestamp_map = read_file(file_timestamp_map_path)
        log.info(f"Read file timestamp map, path: {file_timestamp_map_path} "
                 f"file_timestamp_map : {file_timestamp_map}.")

        # 取恢复时间戳
        target_timestamp = int(RestoreParamParser.get_restore_time_stamp(self._param))
        log.info(f"Get restore timestamp: {target_timestamp}.")

        # 取指定时间戳之前的文件列表
        restore_log_flies = self.report_restore_log_time(file_timestamp_map,
                                                         job_info,
                                                         target_name,
                                                         target_timestamp)

        mailbox_data_dir = os.path.join(data_mount_path, agent_id, mailbox_database_guid)
        mailbox_log_dir = os.path.join(log_mount_path, mailbox_database_guid)

        # 将restore_log_flies中的文件从log仓复制到data仓
        for restore_log_file in restore_log_flies:
            self.copy_file(mailbox_data_dir, mailbox_log_dir, restore_log_file)

    def copy_file(self, mailbox_data_dir, mailbox_log_dir, restore_log_file):
        mailbox_log_path = os.path.join(mailbox_log_dir, restore_log_file)
        if os.path.isfile(mailbox_log_path):
            mailbox_data_path = os.path.join(mailbox_data_dir, restore_log_file)
            try:
                shutil.copy2(mailbox_log_path, mailbox_data_path)
            except Exception as exception_info:
                log.error(f"Copy file err: {exception_info}, pid: {self._pid}")

    def report_restore_log_time(self, file_timestamp_map, job_info, target_name, target_timestamp):
        restore_log_flies = []
        restore_begin_time = ParamConstant.EMPTY_DATA
        restore_end_time = ParamConstant.EMPTY_DATA
        restore_log_file = ParamConstant.EMPTY_DATA
        for file_path, timestamp in file_timestamp_map.items():
            if timestamp < target_timestamp:
                restore_begin_time = timestamp
                restore_log_file = file_path
                restore_log_flies.append(file_path)
            elif restore_end_time == ParamConstant.EMPTY_DATA:
                log.info(f"Get timestamp {timestamp}, bigger than target_timestamp {target_timestamp}")
                restore_end_time = timestamp
        if restore_end_time == ParamConstant.EMPTY_DATA:
            log.info("Target restore time use last copy file")
            restore_end_time = target_timestamp
        log.info(f"Get restore_log_flies {restore_log_flies}")
        if restore_begin_time == ParamConstant.EMPTY_DATA:
            return restore_log_flies
        job_info.restore_begin_time = ExchangeParamParse.convert_timestamp_to_datetime(restore_begin_time)
        job_info.restore_end_time = ExchangeParamParse.convert_timestamp_to_datetime(restore_end_time)
        job_info.restore_file = restore_log_file
        job_info.restore_target_time = ExchangeParamParse.convert_timestamp_to_datetime(target_timestamp)
        job_info.restore_db_name = target_name
        self.report_restore_log_to_time(job_info)
        return restore_log_flies

    @log_start_end()
    def get_database_names_maps(self):
        log_mount_path = RestoreParamParser.get_log_restore_repositories_log_path(self._param)
        metadata_path = os.path.join(log_mount_path, "database_name_dict")
        database_name_dict = read_file(metadata_path)
        database_name_list = database_name_dict["name_list"]
        database_name_maps = []
        for database_name in database_name_list:
            database_name_map = {"source_db_name": database_name, "source_target_name": database_name}
            database_name_maps.append(database_name_map)
        return database_name_maps

    @log_start_end()
    def get_meta_and_data_path(self, agent_id, database_guid):
        repositories = RestoreParamParser.get_exchange_database_copy_repo(self._param)
        meta_path = ""
        data_path = ""
        for repository in repositories:
            if repository.get("repositoryType") == RepositoryDataTypeEnum.META_REPOSITORY.value:
                mount_path = repository.get("path", [""])[0]
                meta_path = PathSymbol.QUOTATION + mount_path + PathSymbol.SLASHES + agent_id + PathSymbol.QUOTATION
            elif repository.get("repositoryType") == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                mount_path = repository.get("path", [""])[0]
                data_path = PathSymbol.QUOTATION + mount_path + PathSymbol.SLASHES + agent_id + PathSymbol.SLASHES + \
                            PathSymbol.MAILBOX_PREFIX + database_guid + PathSymbol.QUOTATION
        return meta_path, data_path

    @log_start_end()
    def delete_database_file(self, target_edb_path, target_log_path):
        # 删除edb文件
        if os.path.exists(target_edb_path):
            os.remove(target_edb_path)
        # 清理日志目录下.log和.chk和.jrs文件
        for filename in os.listdir(target_log_path):
            filename = os.path.join(target_log_path, filename).replace("\\", "/")
            ext = os.path.splitext(filename)[1]
            # 删除.log .chk 和.jrs文件
            if ext in ['.log', '.jrs', '.chk']:
                src_filename = os.path.join(target_log_path, filename)
                os.remove(src_filename)

    def get_exchange_database_guid(self, database_name: str):
        """
        获取exchange 数据库guid
        :param database_name:
        :return:
        """
        guid = self._exchange_cmd_client.get_exchange_database_guid(database_name)
        log.info(f'Get exchange guid :{guid}')
        return guid

    def get_exchange_database_log_file_prefix(self, database_name: str):
        """
        获取exchange 数据库log文件前缀
        :param database_name:
        :return:
        """
        log_file_prefix = self._exchange_cmd_client.get_exchange_database_log_prefix(database_name)
        log.info(f'Get exchange log file prefix :{log_file_prefix}')
        return log_file_prefix

    @log_start_end()
    def do_restore_clean_up(self, job_info):
        database_name_maps = RestoreParamParser.get_database_name_maps(self._param)
        for database_name_map in database_name_maps:
            target_name = database_name_map.get("source_target_name", "")
            log.info(f"Get target name {target_name}")

            # 自动装载数据库
            auto_mount = RestoreParamParser.get_auto_mount(self._param)
            node_ip = ExchangeParamParse.get_restore_node_ip(self._param)
            if "true" == auto_mount:
                try:
                    self.mount_exchange_database(target_name)
                    self.report_restore_table_success_progress(
                        job_info, node_ip, ExchangeReportDBLabel.DATABASE_RESTORE_MOUNT_SUCCESS, target_name, 0)
                except ExchangeInternalException:
                    report_restore_table_failed_progress(
                        job_info, node_ip, ExchangeReportDBLabel.DATABASE_RESTORE_MOUNT_FAILED, target_name)
                    return False

            # 恢复DAG组的被动副本复制功能
            target_type = RestoreParamParser.get_restore_target_type(self._param)
            if ExchangeRestoreTargetType.DAG == target_type:
                try:
                    self.resume_passive_copy(target_name)
                    self.report_restore_table_success_progress(
                        job_info, node_ip, ExchangeReportDBLabel.DATABASE_SET_RESTORE_SUCCESS, target_name, 0)
                except ExchangeInternalException:
                    report_restore_table_failed_progress(
                        job_info, node_ip, ExchangeReportDBLabel.DATABASE_SET_RESTORE_FAILED, target_name)
                    return False
            self.report_restore_table_success_progress(
                job_info, node_ip, ExchangeReportDBLabel.DATABASE_RESTORE_SUCCESS, target_name, 0)
        return True

    @log_start_end()
    def mount_exchange_database(self, database_name: str):
        """
        挂载exchange数据库
        :param database_name: 数据库名称
        :return:
        """
        log.info(f'Mount exchange database:{database_name}')
        self._exchange_cmd_client.mount_exchange_database(database_name)

    @log_start_end()
    def resume_passive_copy(self, database_name: str):
        """
        恢复被动副本复制
        :param database_name: 数据库名称
        :return:
        """
        log.info(f'Resume exchange passive copy database:{database_name}')
        # 查询所有复制副本
        copies = self.get_exchange_database_copies(database_name)
        for copy in copies:
            # 恢复Suspended为暂停的副本, healthy状态副本的复制和激活, mounted为活动副本.
            if ExchangeDatabaseCopyStatus.Suspended == copy.get("Status").get("value"):
                self._exchange_cmd_client.update_passive_copy_seed(copy.get("Name"))

    @log_start_end()
    @out_result_with_job_info()
    @progress_notify_with_job_info(ExchangeReportDBLabel.SUB_JOB_SUCCESS, ExchangeReportDBLabel.SUB_JOB_FALIED)
    def do_sub_job_task(self, job_info: ExchangeJobInfo):
        try:
            return self.restore(job_info)
        except Exception as error:
            log.error(error, exc_info=True)
            return False

    def write_progress_to_file(self, status, progress, log_detail, progress_type, job_info: ExchangeJobInfo):
        output = SubJobDetails(taskId=job_info.job_id, subTaskId=job_info.sub_job_id, progress=int(progress),
                               logDetail=list(), taskStatus=status)
        cache_path = ExchangeParamParse.get_single_type_repo_path_from_restore_param(self._param,
                     RepositoryDataTypeEnum.CACHE_REPOSITORY)
        file_path = os.path.join(cache_path, progress_type)
        output_execution_result_ex(file_path, output.dict(by_alias=True))

    def report_restore_table_success_progress(self, job_info: ExchangeJobInfo, node_ip, label, db_name, data_size):
        """
        恢复任务上报成功的lab
        :param job_info: 任务信息
        :param node_ip: 节点ip
        :param label: lab信息
        :param db_name: 数据库名称
        :param data_size: 数据大小
        :return:无返回值
        """
        log_detail = LogDetail(logInfo=label,
                               logInfoParam=[node_ip, db_name],
                               logLevel=DBLogLevel.INFO.value)
        output = SubJobDetails(taskId=job_info.job_id, subTaskId=job_info.sub_job_id, progress=self._progress,
                               logDetail=[log_detail], taskStatus=SubJobStatusEnum.RUNNING.value, dataSize=data_size). \
            dict(by_alias=True)
        report_job_details_by_rpc(job_info.job_id, job_info.pid, job_info.sub_job_id, output)

    def create_report_restore_progress_thread(self, job_info: ExchangeJobInfo):
        """
        功能描述：单独起个后台线程进度上报
        """
        progress_thread = Thread(target=self.report_restore_progress, args=(job_info,))
        progress_thread.setDaemon(True)
        progress_thread.start()

    def report_restore_progress(self, job_info: ExchangeJobInfo):
        while self._progress < 100:
            output = SubJobDetails(taskId=job_info.job_id, subTaskId=job_info.sub_job_id,
                                   taskStatus=SubJobStatusEnum.RUNNING, progress=self._progress).dict(by_alias=True)
            report_job_details_by_rpc(job_info.job_id, job_info.pid, job_info.sub_job_id, output)
            time.sleep(30)
