#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import shutil
import stat
import time
import uuid
from threading import Thread

from adds.comm.const import LogLevel
from common.common import execute_cmd, check_command_injection_exclude_quote
from common.common_models import LogDetail, SubJobModel, SubJobDetails
from common.const import SubJobStatusEnum, SubJobTypeEnum, SubJobPriorityEnum, \
    SubJobPolicyEnum, ReportDBLabel, RepositoryDataTypeEnum, RestoreType, DBLogLevel
from exchange import log
from exchange.commons.common import output_result_file, get_free_space, report_job_details_by_rpc, \
    check_win_path_legality, read_file, output_execution_result_ex, traverse_folder, get_exchange_config, \
    get_exchange_install_path, get_exchange_conf_path
from exchange.commons.const import ParamConstant, ProgressConst, RestoreSubJobName, \
    ExchangeRestoreTargetType, PathSymbol, ExchangeReportDBLabel, AgentConstant, BodyErr, \
    ExchangeRestoreLocation, CopyDataTypeEnum, ExchangeJobInfo
from exchange.commons.exchange_exception import ExchangeInternalException
from exchange.commons.exchange_param_parse import ExchangeParamParse
from exchange.commons.logger_wins import Logger
from exchange.restore.exchange_restore_base import ExchangeRestoreBase
from exchange.restore.restore_param_parser import RestoreParamParser

LOGGER = Logger().get_logger()


class ExchangeRestoreDatabase(ExchangeRestoreBase):
    def __init__(self, p_id, job_id, sub_job_id, json_param):
        super().__init__(p_id, job_id, sub_job_id, json_param)
        self._p_id = p_id
        self._job_id = job_id
        self._sub_job_id = sub_job_id
        self._json_param = json_param

    def allow_restore_in_local_node(self):
        """
        检查是否可以在当前节点执行
        :return: true,false
        """
        # 检查当前节点MSExchangeIS服务是否在线
        is_online = self._check_exchange_online_status()
        if is_online:
            return True
        if RestoreParamParser.get_restore_job_type(self._json_param) == RestoreType.FINE_GRAINED_RESTORE.value:
            return self.check_rdb_database()
        return False

    def restore_preprequisite(self):
        """
        恢复前置任务
        细粒度恢复跳过前置任务
        创建同名数据库，保证同名数据库存在
        :return: true,false
        """
        # 检查目标环境exchange server版本是否一致(版本必须完全一致)
        expect_version = RestoreParamParser.get_exchange_server_version(self._json_param)
        version_consistency, target_version = self._check_exchange_version_consistency(expect_version)
        if not version_consistency:
            log_detail = LogDetail(logInfo=ReportDBLabel.PRE_REQUISIT_FAILED,
                                   logDetail=BodyErr.EXCHANGE_IS_INCONSISTENT.value,
                                   logDetailParam=[str(expect_version), str(target_version)],
                                   logTimestamp=int(time.time()),
                                   logLevel=DBLogLevel.ERROR)
            self._report_progress(SubJobStatusEnum.FAILED.value, ProgressConst.PROGRESS_ONE_HUNDRED, [log_detail])
            return False
        if RestoreParamParser.get_restore_job_type(self._json_param) == RestoreType.FINE_GRAINED_RESTORE.value:
            self._report_progress(SubJobStatusEnum.RUNNING.value, ProgressConst.MIN_PROGRESS, list())
            self._report_progress(SubJobStatusEnum.COMPLETED.value, ProgressConst.PROGRESS_ONE_HUNDRED, list())
            return True
        self._report_progress(SubJobStatusEnum.RUNNING.value, ProgressConst.MIN_PROGRESS, list())
        # 检查路径合法性
        path_legally = self.check_target_edb_and_log_path_legality()
        if not path_legally:
            log.error("path is illegally")
            self._report_progress(SubJobStatusEnum.FAILED.value, ProgressConst.PROGRESS_ONE_HUNDRED, list())
            return False
        # 检查/创建同名数据库
        target_database_name = RestoreParamParser.get_target_restore_database_name(self._json_param)
        database_exist = self._check_exchange_database_exist(target_database_name)
        if database_exist:
            log.info(f"database exist :{target_database_name}")
            # 同名数据库存在，判断自定义路径和同名数据库路径是否一致
            if self.check_same_edb_and_log_path(target_database_name):
                self._report_progress(SubJobStatusEnum.COMPLETED.value, ProgressConst.PROGRESS_ONE_HUNDRED, list())
                return True
            # 恢复目标路径检查，不一致上报目标恢复路径不一致需要返回错误码
            log.error(f"target edb path or log path is not same with target database:{target_database_name}!")
            error_detail = LogDetail(logInfo=ExchangeReportDBLabel.DATABASE_CREATE_FAILED,
                                     logInfoParam=[target_database_name], logLevel=LogLevel.ERROR.value,
                                     logDetail=BodyErr.EDB_AND_LOG_PATH_SAME_CHECK_FAILED.value,
                                     logDetailParam=[target_database_name])
            self._report_progress(SubJobStatusEnum.FAILED.value, ProgressConst.PROGRESS_ONE_HUNDRED, [error_detail])
            return False
        else:
            target_edb_path = RestoreParamParser.get_target_database_edb_path(self._json_param)
            target_log_path = RestoreParamParser.get_target_database_log_path(self._json_param)
            if len(target_edb_path) == 0 and len(target_log_path) == 0:
                # 原位置改名恢复场景，同名数据库不存在，并且未配置自定义路径
                # 获取副本备份时edb和log路径，在同级创建restore_dbname_uuid文件夹执行恢复
                new_edb_path, new_log_path = self.get_edb_and_log_path_from_copy(target_database_name)
                return self.create_exchange_database(target_database_name, new_edb_path, new_log_path)
            # 创建同名数据库执行恢复，根据目标edb路径和log路径(必填)
            # 查询所有活动数据库副本和所属服务节点,分组统计数量，获取最少活动副本节点(shell命令)
            # 创建活动数据库，指定服务节点(shell命令)
            return self.create_exchange_database(target_database_name, target_edb_path, target_log_path)

    def create_exchange_database(self, target_database_name, target_edb_path, target_log_path):
        server_name = self._get_idle_node_server()
        is_created = self._create_exchange_database(target_database_name, server_name, target_edb_path,
                                                    target_log_path)
        if is_created:
            self._report_running_progress(ExchangeReportDBLabel.DATABASE_CREATE_SUCCESS, [target_database_name],
                                          ProgressConst.MIN_PROGRESS)
            self._report_progress(SubJobStatusEnum.COMPLETED.value, ProgressConst.PROGRESS_ONE_HUNDRED, list())
            return True
        self._report_failed_progress(ExchangeReportDBLabel.DATABASE_CREATE_FAILED, [target_database_name],
                                     ProgressConst.MIN_PROGRESS)
        self._report_progress(SubJobStatusEnum.FAILED.value, ProgressConst.PROGRESS_ONE_HUNDRED, list())
        return False

    def check_target_edb_and_log_path_legality(self):
        # 检查是否应该设置edb和log参数
        target_edb_path = RestoreParamParser.get_target_database_edb_path(self._json_param)
        target_log_path = RestoreParamParser.get_target_database_log_path(self._json_param)
        restore_location = RestoreParamParser.get_restore_location(self._json_param)
        if ExchangeRestoreLocation.ORIGIN == restore_location:
            # 自定义路径不允许存在
            if target_edb_path or target_log_path:
                log.error("origin restore but target path set")
                return False
            return True
        if ExchangeRestoreLocation.NEW == restore_location:
            # 新位置两个路径必须存在
            if not all([target_edb_path, target_log_path]):
                log.error("new restore but target path not all set")
                return False
            return check_win_path_legality(target_edb_path) and check_win_path_legality(target_log_path)
        return True

    def get_edb_and_log_path_from_copy(self, database_name):
        """
        C:\\Program Files\\Microsoft\\Exchange Server\\V15\\Mailbox\\mb01\\mb01.edb
        C:\\Program Files\\Microsoft\\Exchange Server\\V15\\Mailbox\\mb01
        转换成
        C:\\Program Files\\Microsoft\\Exchange Server\\V15\\Mailbox\\Restore_mb01_uuid\\Restore_mb01_uuid.edb
        C:\\Program Files\\Microsoft\\Exchange Server\\V15\\Mailbox\\Restore_mb01_uuid
        :param database_name:
        :return:
        """
        edb_path, log_path = RestoreParamParser.get_copy_exchange_edb_path_and_log_path(self._json_param)
        second_last_backslash_index = edb_path.rfind('\\', 0, edb_path.rfind('\\'))
        edb_pre_path = edb_path[:second_last_backslash_index]
        log_pre_path = log_path[:log_path.rfind('\\')]
        new_db_file_name = "Restore_" + database_name + "_" + str(uuid.uuid4())[:8]
        new_edb_path = edb_pre_path + '\\' + new_db_file_name + '\\' + new_db_file_name + '.edb'
        new_log_path = log_pre_path + '\\' + new_db_file_name
        return new_edb_path, new_log_path

    def restore_gen_sub_job(self):
        """
        根据恢复副本所属类型，单机/DAG组进行拆分子任务
        :return: true,false
        """
        # 上报任务进度开始
        self._report_progress(SubJobStatusEnum.RUNNING.value, ProgressConst.MIN_PROGRESS, list())
        target_type = RestoreParamParser.get_restore_target_type(self._json_param)
        nodes_list = RestoreParamParser.get_restore_nodes_list(self._json_param)
        target_database_name = RestoreParamParser.get_target_restore_database_name(self._json_param)
        # 细粒度恢复时只需要在一个节点执行
        if (target_type == ExchangeRestoreTargetType.SINGLE or
                RestoreParamParser.get_restore_job_type(self._json_param) == RestoreType.FINE_GRAINED_RESTORE.value):
            log.info("Enter restore_gen_sub_job single")
            job_list = self.generate_sub_job_list(nodes_list[0].get("id"))
            output_result_file(self._p_id, job_list)
        elif target_type == ExchangeRestoreTargetType.DAG:
            log.info("Enter restore_gen_sub_job DAG")
            # 获取数据库备份节点服务名称匹配节点列表
            try:
                self.generate_dag_restore_sub_job(nodes_list, target_database_name)
            except ExchangeInternalException as e:
                log.error("can not generate sub job for dag group, ret=%s.", e.message)
                return False
        else:
            log.error("gen sub job fail bad restore type")
            return False
        # 上报任务进度结束
        self._report_progress(SubJobStatusEnum.COMPLETED.value, ProgressConst.PROGRESS_ONE_HUNDRED, list())
        return True

    def restore(self):
        """
        执行恢复任务
        :return:
        """
        if RestoreParamParser.get_restore_job_type(self._json_param) == RestoreType.FINE_GRAINED_RESTORE.value:
            return self.do_mailbox_restore()
        return self.do_normal_restore()

    def do_normal_restore(self):
        log.info("Enter ExchangeRestoreDatabase.do_normal_restore")
        # 任务开始
        self._report_running_progress(ExchangeReportDBLabel.START_RESTORE_LABEL, [self._sub_job_id],
                                      ProgressConst.MIN_PROGRESS)
        # 保持任务线程
        self.create_report_restore_progress_thread()
        # 恢复前准备
        ret = self.do_restore_preparation()
        if not ret:
            self._report_failed_progress(ReportDBLabel.SUB_JOB_FALIED, [self._sub_job_id], ProgressConst.MIN_PROGRESS)
            return False
        # 调用Vss执行恢复
        ret = self.do_restore_with_vss()
        if not ret:
            self._report_failed_progress(ReportDBLabel.SUB_JOB_FALIED, [self._sub_job_id], ProgressConst.MIN_PROGRESS)
            return False
        # 环境清理
        ret = self.do_restore_clean_up()
        if not ret:
            self._report_failed_progress(ReportDBLabel.SUB_JOB_FALIED, [self._sub_job_id], ProgressConst.MIN_PROGRESS)
            return False
        # 恢复任务结束
        self._report_completed_progress(ExchangeReportDBLabel.SUB_JOB_SUCCESS, [self._sub_job_id],
                                        ProgressConst.PROGRESS_ONE_HUNDRED)
        return True

    def check_rdb_database(self):
        databases = self._get_database_in_server()
        if not isinstance(databases, list):
            databases = [databases]
        for database in databases:
            if database.get("Recovery") == "True":
                status = self._get_database_status(database.get("Name"))
                if status.get("Status") == "Mounted":
                    return False
        return True

    def restore_post_job(self):
        if RestoreParamParser.get_restore_job_type(self._json_param) == RestoreType.FINE_GRAINED_RESTORE.value:
            log.info(f"Enter restore post job for fine grained restore!")
            is_success = True
            try:
                request_ids = self.get_request_ids()
                # 5. 删除恢复请求
                for request_id in request_ids:
                    self._remove_mailbox_restore_request(request_id)
                    log.info(f"5. delete request id: {request_id} successfully.")
            except Exception as e:
                is_success = False
                log.error(f"5. delete request ids failed, err: {e}")
            try:
                # 6. 解挂载数据库
                self._dismount_exchange_database(self._job_id)
                log.info(f"6. dismount database: {self._job_id} successfully.")
                # 7. 删除rdb数据库
                self._remove_database(self._job_id)
                log.info(f"7. remove database: {self._job_id} successfully.")
            except Exception as e:
                is_success = False
                log.error(f"dismount database failed, err: {e}")
            try:
                # 8. 清理本地数据库文件
                target_edb_path = self.get_edb_path()
                shutil.rmtree(target_edb_path)
                log.info(f"8. remove local data path: {target_edb_path} successfully.")
            except Exception as e:
                log.error(f"8. remove local data path failed, err: {e}.")

            if not is_success:
                self._report_failed_progress(ReportDBLabel.SUB_JOB_FALIED, [self._sub_job_id],
                                             ProgressConst.MIN_PROGRESS)
                return False
        try:
            file_path = os.path.join(ParamConstant.PARAM_FILE_PATH, "database_is_recovered")
            output_execution_result_ex(file_path, {"database_is_recovered": True})
            log.info(f"Write database_is_recovered file successful.")
        except Exception as err:
            log.error(f"Write database_is_recovered file failed, err: {err}.")
            return False
        self._report_progress(SubJobStatusEnum.COMPLETED.value, ProgressConst.PROGRESS_ONE_HUNDRED, list())
        return True

    def query_job_permission(self):
        return True

    def progress_comm(self):
        self.report_progress_comm()
        return True

    def do_restore_preparation(self):
        """
        恢复任务准备工作
        1. 检查目标环境exchange server版本是否一致
        2. 设置数据库可还原覆盖属性
        3. 停止DAG组被动副本复制和激活
        4. 卸载数据库
        5. 检查数据库卸载情况
        """
        # 检查目标环境exchange server版本是否一致(版本必须完全一致)移到前置任务校验
        # 细粒度恢复不需要后续检查
        if RestoreParamParser.get_restore_job_type(self._json_param) == RestoreType.FINE_GRAINED_RESTORE.value:
            # 细粒度恢复时检查目标邮箱是否存在
            target_mailbox_info_list = RestoreParamParser.get_fine_grained_restore_objects(self._json_param)
            for target_mailbox_info in target_mailbox_info_list:
                log.debug(f"target_mailbox_info is:{target_mailbox_info}")
                target_mailbox = json.loads(target_mailbox_info.get("name")).get("name")
                target_mailbox_name = target_mailbox.split("@")[0]
                if not self._check_exchange_mailbox_exist(target_mailbox_name):
                    self.report_job_details_when_mailbox_not_exist(target_mailbox_name)
                    return False
            return True
        # 设置数据库可被还原覆盖属性
        target_database_name = RestoreParamParser.get_target_restore_database_name(self._json_param)
        ip = RestoreParamParser.get_current_node_endpoint(self._json_param)
        try:
            self._set_exchange_database_allow_restore(target_database_name)
            self._report_running_progress(ExchangeReportDBLabel.DATABASE_ALLOW_RESTORE_SUCCESS,
                                          [ip, target_database_name], ProgressConst.MIN_PROGRESS)
        except ExchangeInternalException:
            self._report_failed_progress(ExchangeReportDBLabel.DATABASE_ALLOW_RESTORE_FAILED,
                                         [ip, target_database_name], ProgressConst.PROGRESS_ONE_HUNDRED)
            return False

        # 暂停DAG组被动副本的复制，重播和激活
        target_type = RestoreParamParser.get_restore_target_type(self._json_param)
        if ExchangeRestoreTargetType.DAG == target_type:
            try:
                self._suspend_passive_copy(target_database_name)
                self._report_running_progress(ExchangeReportDBLabel.DATABASE_SET_RESTORE_SUCCESS,
                                              [ip, target_database_name], ProgressConst.MIN_PROGRESS)
            except ExchangeInternalException:
                self._report_failed_progress(ExchangeReportDBLabel.DATABASE_SET_RESTORE_FAILED,
                                             [ip, target_database_name], ProgressConst.PROGRESS_ONE_HUNDRED)
                return False

        # 卸载数据库
        auto_dismount = RestoreParamParser.get_auto_dismount(self._json_param)
        if "true" == auto_dismount:
            try:
                self._dismount_exchange_database(target_database_name)
            except ExchangeInternalException:
                self._report_failed_progress(ExchangeReportDBLabel.DATABASE_RESTORE_DISMOUNT_FAILED,
                                             [ip, target_database_name], ProgressConst.PROGRESS_ONE_HUNDRED)
                return False

        # 检查数据库活动副本卸载情况,卸载失败返回False
        return self.check_dismount_status(ip, target_database_name)

    def check_dismount_status(self, ip, target_database_name):
        exchange_dismount_status = False
        for _ in range(AgentConstant.RETRY_TIMES):
            exchange_dismount_status = self._check_exchange_database_dismounted_status(target_database_name)
            if exchange_dismount_status:
                break
            else:
                log.warn(f"Failed getting Microsoft Exchange dismount status,\
                         wait for {AgentConstant.RETRY_WAIT_SECONDS} seconds")
                time.sleep(AgentConstant.RETRY_WAIT_SECONDS)

        if not exchange_dismount_status:
            self._report_failed_progress(ExchangeReportDBLabel.DATABASE_RESTORE_UNMOUNT,
                                         [ip, target_database_name], ProgressConst.PROGRESS_ONE_HUNDRED)
        else:
            self._report_running_progress(ExchangeReportDBLabel.DATABASE_RESTORE_DISMOUNT_SUCCESS,
                                          [ip, target_database_name], ProgressConst.MIN_PROGRESS)
        return exchange_dismount_status

    def report_job_details_when_mailbox_not_exist(self, mailbox_name):
        log_detail = LogDetail(logInfo=ExchangeReportDBLabel.SUB_JOB_FALIED,
                               logInfoParam=[self._sub_job_id], logLevel=LogLevel.ERROR.value,
                               logDetail=BodyErr.DB_USER_RESTORE_TAR_MAILBOX_NOT_EXIST.value,
                               logDetailParam=[mailbox_name.split("@")[0]])
        self._report_progress(SubJobStatusEnum.FAILED.value, ProgressConst.PROGRESS_ONE_HUNDRED, [log_detail])

    def do_restore_with_vss(self):
        target_database_name = RestoreParamParser.get_target_restore_database_name(self._json_param)
        # 查询目标数据库log路径和edb路径
        # 原位置 查询数据库名称对应的log路径和edb路径
        # 新位置 自定义路径使用自定义路径恢复，未自定义路径查询数据库名称对应的log路径和edb路径
        target_edb_path, target_log_path = self.get_target_edb_and_log_path(target_database_name)

        # 获取日志前缀，并删除日志路径和数据路径下的edb,log,chk文件(可以搞成先备份后删除，任务失败后置任务执行回滚操作)
        try:
            # 删除edb文件
            if os.path.exists(target_edb_path):
                os.remove(target_edb_path)
            # 清理日志目录下.log和.chk和.jrs文件
            for filename in os.listdir(target_log_path):
                filename = os.path.join(target_log_path, filename).replace("\\", "/")
                ext = os.path.splitext(filename)[1]
                # 删除.log .chk 和.jrs文件
                if ext in ['.log', '.jrs', '.chk']:
                    src_filename = os.path.join(target_log_path, filename)
                    os.remove(src_filename)
        except Exception as err:
            log.error("Exception during del old db: %s.", str(err))
            return False

        # 获取agent_id
        agent_id = RestoreParamParser.get_backup_copy_agent_id(self._json_param)
        if agent_id is None:
            log.error("Do restore with vss fail case:copy agent id empty")
            return False
        data_path, database_guid, meta_path = self.get_data_and_meta_path(agent_id)

        if RestoreParamParser.get_restore_type(self._json_param) == CopyDataTypeEnum.LOG_COPY:
            # 日志恢复场景下，将日志恢复文件， 从log仓复制到data仓
            self.copy_restore_log_file_to_data(agent_id, database_guid, target_database_name)

        # 目标数据库guid，根据目标数据库名称进行查询
        target_database_guid = self._get_exchange_database_guid(database_name=target_database_name)
        # 日志前缀(根据获取数据仓下.chk的文件名)
        log_prefix = self._get_exchange_database_log_file_prefix(database_name=target_database_name)
        # 目标edb和log路径加上引号
        target_edb_path = "\"" + target_edb_path + "\""
        target_log_path = "\"" + target_log_path + "\""
        cmd = f"{ParamConstant.VSS_TOOL_PATH} Restore " \
              f"{meta_path} {data_path} {target_edb_path} {target_log_path}" \
              f" {database_guid} {target_database_guid} {log_prefix}"
        ret, stdout, stderr = execute_cmd(cmd)
        # 获取ip
        ip = RestoreParamParser.get_current_node_endpoint(self._json_param)
        if not ret:
            log.error("Call vss fail err is %s", stderr)
            self._report_failed_progress(ExchangeReportDBLabel.DATABASE_RESTORE_VSS_FAILED,
                                         [ip, target_database_name], ProgressConst.PROGRESS_ONE_HUNDRED)
            return False
        self._report_running_progress(ExchangeReportDBLabel.DATABASE_RESTORE_VSS_SUCCESS,
                                      [ip, target_database_name], ProgressConst.MIN_PROGRESS)
        return True

    def copy_restore_log_file_to_data(self, agent_id, database_guid, target_name):
        # 取日志仓、数据仓路径
        data_mount_path = RestoreParamParser.get_data_restore_repositories_data_path(self._json_param)
        log_mount_path = RestoreParamParser.get_log_restore_repositories_log_path(self._json_param)
        log.info(f"Get data path {data_mount_path}，log path {log_mount_path}.")

        # 取日志数据和时间戳的映射文件
        mailbox_database_guid = PathSymbol.MAILBOX_PREFIX + database_guid
        file_timestamp_map_path = os.path.join(log_mount_path,
                                               mailbox_database_guid,
                                               "log_to_time")
        file_timestamp_map = read_file(file_timestamp_map_path)
        log.info(f"Read file timestamp map, path: {file_timestamp_map_path} "
                    f"file_timestamp_map : {file_timestamp_map}.")

        # 取恢复时间戳
        target_timestamp = int(RestoreParamParser.get_restore_time_stamp(self._json_param))
        log.info(f"Get restore timestamp: {target_timestamp}.")

        # 取指定时间戳之前的文件列表
        restore_log_flies = self.get_restore_log_to_time(file_timestamp_map,
                                                         target_name,
                                                         target_timestamp)

        mailbox_data_dir = os.path.join(data_mount_path, agent_id, mailbox_database_guid)
        mailbox_log_dir = os.path.join(log_mount_path, mailbox_database_guid)

        # 将restore_log_flies中的文件从log仓复制到data仓
        for restore_log_file in restore_log_flies:
            self.copy_file(mailbox_data_dir, mailbox_log_dir, restore_log_file)

    def copy_file(self, mailbox_data_dir, mailbox_log_dir, restore_log_file):
        mailbox_log_path = os.path.join(mailbox_log_dir, restore_log_file)
        if os.path.isfile(mailbox_log_path):
            mailbox_data_path = os.path.join(mailbox_data_dir, restore_log_file)
            try:
                shutil.copy2(mailbox_log_path, mailbox_data_path)
            except Exception as exception_info:
                log.error(f"Copy file err: {exception_info}, job id: {self._job_id}")

    def get_restore_log_to_time(self, file_timestamp_map, target_name, target_timestamp):
        restore_log_flies = []
        restore_begin_time = ParamConstant.EMPTY_DATA
        restore_end_time = ParamConstant.EMPTY_DATA
        restore_log_file = ParamConstant.EMPTY_DATA
        for file_path, timestamp in file_timestamp_map.items():
            if timestamp < target_timestamp:
                restore_begin_time = timestamp
                restore_log_file = file_path
                restore_log_flies.append(file_path)
            elif restore_end_time == ParamConstant.EMPTY_DATA:
                log.info(f"Get timestamp {timestamp}, bigger than target_timestamp {target_timestamp}.")
                restore_end_time = timestamp
        if restore_end_time == ParamConstant.EMPTY_DATA:
            log.info("Target restore time use last copy file")
            restore_end_time = target_timestamp
        log.info(f"Get restore_log_flies {restore_log_flies}.")
        if restore_begin_time == ParamConstant.EMPTY_DATA:
            return restore_log_flies
        job_info = ExchangeJobInfo("")
        job_info.restore_begin_time = ExchangeParamParse.convert_timestamp_to_datetime(restore_begin_time)
        job_info.restore_end_time = ExchangeParamParse.convert_timestamp_to_datetime(restore_end_time)
        job_info.restore_file = restore_log_file
        job_info.restore_target_time = ExchangeParamParse.convert_timestamp_to_datetime(target_timestamp)
        job_info.restore_db_name = target_name
        self.report_restore_log_to_time(job_info)
        return restore_log_flies

    def report_restore_log_to_time(self, job_info: ExchangeJobInfo):
        # 数据库({0})正在执行日志恢复，此次用户选择恢复时间点为({1})，实际恢复的文件为({2})，该文件对应的实际恢复的时间范围为({3})到({4})。
        log_detail = LogDetail(logInfo=ExchangeReportDBLabel.DATABASE_BACKUP_RESTORE_TIME_INFO_SUCCESS,
                               logInfoParam=[job_info.restore_db_name, job_info.restore_target_time,
                                             job_info.restore_file, job_info.restore_begin_time,
                                             job_info.restore_end_time],
                               logLevel=DBLogLevel.INFO.value)
        log.info(f"Get restore log to time detail {log_detail}")
        output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, progress=self._progress,
                               logDetail=[log_detail], taskStatus=SubJobStatusEnum.RUNNING.value,
                               dataSize=0).dict(by_alias=True)
        report_job_details_by_rpc(self._job_id, self._p_id, self._sub_job_id, output)

    def do_restore_to_rdb_with_vss(self, target_edb_path, target_log_path):
        target_database_name = self._job_id
        # 获取agent_id
        agent_id = RestoreParamParser.get_backup_copy_agent_id(self._json_param)
        if agent_id is None:
            log.error("Do restore with vss fail case:copy agent id empty in fine grained restore")
            raise ExchangeInternalException("Do restore with vss fail case:copy agent id empty")

        data_path, database_guid, meta_path = self.get_data_and_meta_path(agent_id)
        # 目标数据库guid，根据目标数据库名称进行查询
        target_database_guid = self._get_exchange_database_guid(database_name=target_database_name)
        # 日志前缀(根据获取数据仓下.chk的文件名)
        log_prefix = self._get_exchange_database_log_file_prefix(database_name=target_database_name)
        # 目标edb和log路径加上引号
        target_edb_path = "\"" + target_edb_path + "\""
        target_log_path = "\"" + target_log_path + "\""
        cmd = f"{ParamConstant.VSS_TOOL_PATH} Restore " \
              f"{meta_path} {data_path} {target_edb_path} {target_log_path}" \
              f" {database_guid} {target_database_guid} {log_prefix}"
        ret, stdout, stderr = execute_cmd(cmd)
        if ret != "0":
            log.error(f"Call vss fail err is {stderr}")
            return False
        return True

    def get_data_and_meta_path(self, agent_id):
        # 获取数据仓和元数据仓路径
        database_guid = RestoreParamParser.get_exchange_database_guid(self._json_param)
        repositories = RestoreParamParser.get_exchange_database_copy_repo(self._json_param)
        meta_path = ""
        data_path = ""
        for repository in repositories:
            if repository.get("repositoryType") == RepositoryDataTypeEnum.META_REPOSITORY.value:
                mount_path = repository.get("path", [""])[0]
                meta_path = PathSymbol.QUOTATION + mount_path + PathSymbol.SLASHES + agent_id + PathSymbol.QUOTATION
            elif repository.get("repositoryType") == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                mount_path = repository.get("path", [""])[0]
                data_path = PathSymbol.QUOTATION + mount_path + PathSymbol.SLASHES + agent_id + PathSymbol.SLASHES + \
                            PathSymbol.MAILBOX_PREFIX + database_guid + PathSymbol.QUOTATION
        return data_path, database_guid, meta_path

    def do_restore_clean_up(self):
        log.info("Enter ExchangeRestoreDatabase.do_restore_clean_up")
        database_name = RestoreParamParser.get_copy_database_name(self._json_param)
        new_database_name = RestoreParamParser.get_restore_new_database_name(self._json_param)
        target_database_name = database_name if database_name == new_database_name else new_database_name
        # 自动装载数据库
        auto_mount = RestoreParamParser.get_auto_mount(self._json_param)
        ip = RestoreParamParser.get_current_node_endpoint(self._json_param)
        if "true" == auto_mount:
            try:
                self._mount_exchange_database(target_database_name)
                self._report_running_progress(ExchangeReportDBLabel.DATABASE_RESTORE_MOUNT_SUCCESS,
                                              [ip, target_database_name], ProgressConst.MIN_PROGRESS)
            except ExchangeInternalException:
                self._report_failed_progress(ExchangeReportDBLabel.DATABASE_RESTORE_MOUNT_FAILED,
                                             [ip, target_database_name], ProgressConst.PROGRESS_ONE_HUNDRED)
                return False
            # 恢复DAG组的被动副本复制功能
            try:
                target_type = RestoreParamParser.get_restore_target_type(self._json_param)
                if ExchangeRestoreTargetType.DAG == target_type:
                    self._update_passive_copy_seed(target_database_name)
                    self._report_running_progress(ExchangeReportDBLabel.DATABASE_SET_RESTORE_SUCCESS,
                                                  [ip, target_database_name], ProgressConst.MIN_PROGRESS)
            except ExchangeInternalException:
                self._report_failed_progress(ExchangeReportDBLabel.DATABASE_SET_RESTORE_FAILED,
                                             [ip, target_database_name], ProgressConst.PROGRESS_ONE_HUNDRED)
                return False
        return True

    def do_mailbox_restore(self):
        log.info("Enter ExchangeRestoreDatabase.do_mailbox_restore")
        # 任务开始
        self._report_running_progress(ExchangeReportDBLabel.START_RESTORE_LABEL, [self._sub_job_id],
                                      ProgressConst.MIN_PROGRESS)
        # 保持任务线程
        self.create_report_restore_progress_thread()
        # 恢复前准备
        ret = self.do_restore_preparation()
        if not ret:
            self._report_failed_progress(ReportDBLabel.SUB_JOB_FALIED, [self._sub_job_id], ProgressConst.MIN_PROGRESS)
            return False
        # 复制数据到主机
        log_folder_path = self.get_database_path()
        if not log_folder_path:
            return log_folder_path
        edb_path = f"{log_folder_path}\\{self._job_id}.edb"
        request_ids = []
        try:
            # 0. 创建RDB数据库, 任务id为数据库名称
            self._create_rdb_database(self._job_id, edb_path, log_folder_path)
            log.info(f"0. create database: {self._job_id} successfully.")
            # 1. 使用vss恢复数据到rdb数据库
            self.do_restore_to_rdb_with_vss(edb_path, log_folder_path)
            log.info(f"1. restore database: {self._job_id} using vss tool successfully.")
            # 2. 挂载RDB数据库
            self._mount_exchange_database(self._job_id)
            log.info(f"2. mount database: {self._job_id} successfully.")
            # 3. 恢复邮箱
            for i, sub_object in enumerate(RestoreParamParser.get_fine_grained_restore_objects(self._json_param)):
                request_name = f"{self._job_id}-{i}"
                mailbox = json.loads(sub_object.get("name"))
                request_id = self._restore_mailbox(request_name, self._job_id, mailbox["uuid"],
                                                   mailbox["name"].split("@")[0])
                request_ids.append(request_id)
            log.info(f"3. create restore mailbox requests successfully, size: {len(request_ids)}.")
            self.save_request_ids(request_ids)
            # 4. 查询恢复请求状态
            self.get_restore_request_status(request_ids)
            self._report_completed_progress(ExchangeReportDBLabel.SUB_JOB_SUCCESS, [self._sub_job_id],
                                            ProgressConst.PROGRESS_ONE_HUNDRED)
            return True
        except Exception as e:
            log.error(f"mailbox fine grained restore failed, err: {e}!", exc_info=True)
            self._report_failed_progress(ExchangeReportDBLabel.SUB_JOB_FALIED, [self._sub_job_id],
                                         ProgressConst.PROGRESS_ONE_HUNDRED)
            return False

    def get_restore_request_status(self, request_ids):
        already_finished_mail_name = {}
        last_already_finished = 0
        while True:
            status = self.check_exchange_restore_request_status(request_ids)
            log.info(f"restore_mailbox status: {status}")
            current_status = [item["Status"]["Value"] == "Failed" for item in status.values()]
            if any(current_status):
                err_msg = f"Request has failed for job: {self._job_id}"
                log.error(err_msg)
                raise Exception(err_msg)
            current_status = [item["Status"]["Value"] == "Completed" for item in status.values()]
            log.info(f"restore_mailbox current_status: {current_status}")
            log.info(f"wait for all requests finished..., current status: {current_status}")
            already_finished = current_status.count(True)
            self.check_mail_restore_progress(request_ids, status, already_finished_mail_name, last_already_finished)
            last_already_finished = already_finished
            if all(current_status):
                log.info(f"4. all requests have finished for job: {self._job_id}")
                break
            time.sleep(30)

    def check_mail_restore_progress(self, request_ids, status, already_finished_mail_name, last_already_finished):
        finished_item = [item for item in status.values() if item["Status"]["Value"] == "Completed"]
        item_index = 0
        # 如果在30秒内有新增恢复完成的邮箱，主动上报日志，有多个的话，分进度依次上报
        for item in finished_item:
            mail_name = str(item["TargetMailbox"]["Name"])
            if already_finished_mail_name.get(mail_name, None) is None:
                item_index += 1
                already_finished_mail_name[mail_name] = True
                self._report_running_progress(
                    ExchangeReportDBLabel.EXCHANGE_RESTORE_MAILBOX_PROGRESS,
                    [self._sub_job_id, mail_name,
                     str(last_already_finished + item_index),
                     str(len(request_ids))], ProgressConst.MIN_PROGRESS)

    def check_exchange_restore_request_status(self, request_ids):
        result = {}
        for i, request_id in enumerate(request_ids):
            request_name = f"{self._job_id}-{i}"
            result[request_id] = self._get_mailbox_restore_request_status(request_name)
        return result

    def get_database_path(self):
        target_edb_path = self.get_edb_path()
        result = self.check_free_space(target_edb_path)
        if not result:
            return result
        # 路径存在先清理数据
        if os.path.exists(target_edb_path):
            shutil.rmtree(target_edb_path)
        return target_edb_path

    def get_edb_path(self):
        exchange_path = get_exchange_install_path()
        exchange_config = get_exchange_config()
        exchange_config_content = exchange_config.get("edbStorageDisk", exchange_path).strip()
        target_edb_path = exchange_config_content if exchange_config_content else exchange_path
        target_edb_path_root = os.path.splitdrive(target_edb_path)[0]
        if not os.path.exists(target_edb_path_root):
            log.error(f"Edb storage disk {target_edb_path} is invalid")
            raise Exception(f"Edb storage disk {target_edb_path} is invalid")
        target_edb_path = os.path.join(target_edb_path_root, f"\\{self._job_id}")
        return target_edb_path

    # 校验备份副本与edb存储磁盘剩余空间大小
    def check_free_space(self, target_path):
        copy_size = RestoreParamParser.get_copy_size(self._json_param) * 1024 * 1.2
        drive_free_capacity = get_free_space(target_path)
        disk_name = target_path[0]
        drive_free_capacity_str = str(drive_free_capacity) + " Byte"
        copy_size_str = str(copy_size) + " Byte"
        exchange_conf_path = get_exchange_conf_path()
        if copy_size > drive_free_capacity:
            log.error(
                f"The storage space on the {disk_name} drive is insufficient, free: {drive_free_capacity},"
                f" need: {copy_size}.")
            self._report_failed_progress(ReportDBLabel.RESTORE_MAILBOX_STORAGE_SPACE_FAILED,
                                         [disk_name, drive_free_capacity_str, copy_size_str, exchange_conf_path],
                                         ProgressConst.PROGRESS_ONE_HUNDRED)
            return False
        return True

    def generate_sub_job_list(self, target_node_id):
        job_list = [
            SubJobModel(jobId=self._job_id, sub_job_id=self._sub_job_id,
                        jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value,
                        jobName=RestoreSubJobName.ACTUALL_RESTORE,
                        jobPriority=SubJobPriorityEnum.JOB_PRIORITY_1.value,
                        policy=SubJobPolicyEnum.FIXED_NODE.value, execNodeId=target_node_id).dict(by_alias=True)
        ]
        log.info("final target node id is %s", target_node_id)
        return job_list

    def get_target_edb_and_log_path(self, target_database_name):
        target_edb_path = RestoreParamParser.get_target_database_edb_path(self._json_param)
        target_log_path = RestoreParamParser.get_target_database_log_path(self._json_param)
        if not all([target_edb_path, target_log_path]):
            target_edb_path, target_log_path = self._get_edb_and_log_path(database_name=target_database_name)
        return target_edb_path, target_log_path

    def check_same_edb_and_log_path(self, target_database_name):
        target_edb_path = RestoreParamParser.get_target_database_edb_path(self._json_param)
        target_log_path = RestoreParamParser.get_target_database_log_path(self._json_param)
        if all([target_edb_path, target_log_path]):
            edb_path, log_path = self._get_edb_and_log_path(database_name=target_database_name)
            return target_edb_path == edb_path and target_log_path == log_path
        return True

    def generate_dag_restore_sub_job(self, nodes_list, target_database_name):
        server_name = self._get_exchange_database_server_name(target_database_name)
        for node in nodes_list:
            if server_name.lower() == node.get("name").lower():
                log.info("find node do DAG database restore")
                job_list = self.generate_sub_job_list(node.get("id"))
                output_result_file(self._p_id, job_list)
                break

    def save_request_ids(self, request_ids):
        cache_path = self.get_request_id_file_path()
        if not os.path.exists(cache_path):
            os.makedirs(cache_path)
        flags = os.O_WRONLY | os.O_CREAT
        modes = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR
        with os.fdopen(os.open(os.path.join(cache_path, "mailbox_request_ids"), flags, modes), 'w') as f:
            json.dump({self._job_id: request_ids}, f)

    def get_request_ids(self):
        cache_path = self.get_request_id_file_path()
        file_name = os.path.join(cache_path, "mailbox_request_ids")
        result = {}
        if os.path.exists(cache_path) and os.path.exists(file_name):
            with open(file_name, 'r', encoding="UTF-8") as f:
                result = json.load(f)
        # 清理cache数据
        shutil.rmtree(cache_path)
        return result.get(self._job_id, [])

    def get_request_id_file_path(self):
        cache_path = ""
        repositories = RestoreParamParser.get_exchange_database_copy_repo(self._json_param)
        for repository in repositories:
            if repository.get("repositoryType") == RepositoryDataTypeEnum.CACHE_REPOSITORY.value:
                mount_path = repository.get("path", [""])[0]
                cache_path = os.path.join(mount_path, self._job_id)
        return cache_path

    def create_report_restore_progress_thread(self):
        """
        功能描述：单独起个后台线程进度上报
        """
        progress_thread = Thread(target=self.report_restore_progress)
        progress_thread.setDaemon(True)
        progress_thread.start()

    def report_restore_progress(self):
        while self._progress < ProgressConst.PROGRESS_ONE_HUNDRED:
            output = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                   taskStatus=SubJobStatusEnum.RUNNING, progress=self._progress).dict(by_alias=True)
            report_job_details_by_rpc(self._job_id, self._p_id, self._sub_job_id, output)
            time.sleep(30)
