#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import time
from random import sample

from common.common import output_execution_result_ex, execute_cmd
from common.common_models import SubJobModel
from common.const import SubJobStatusEnum, SubJobTypeEnum, SubJobPriorityEnum, SubJobPolicyEnum, \
    CopyDataTypeEnum
from sqlserver import log
from sqlserver.commons.common import output_action_result, get_env_variable, check_is_sub_job, \
    get_restore_channel, is_new_restore_path
from sqlserver.commons.const import Progress as ProgressConst
from sqlserver.commons.const import RestoreTypeEnum, VDIActionType, ParamConstant, ExecCmdResult, RestoreSubJobName, \
    SQLServerProgressFileType, BodyErr, SQLServerCode, SqlSeverErrorKeyword, \
    SqlServerReportLabel, SubJobExecResult
from sqlserver.commons.sqlserver_utils import get_format_restore_time
from sqlserver.sqlserver_restore_base import SQLServerRestoreBase

MAX_RETRY = 3
TIME_INTERNAL = 5


class SQLServerRestoreAlwaysOn(SQLServerRestoreBase):
    def __init__(self, p_id, job_id, sub_job_id, json_param):
        super().__init__(p_id, job_id, sub_job_id, json_param)
        self._job_id = job_id
        self.group_name = ""  # 待恢复可用性组名称
        self.sub_job_name = ""  # 子任务名称
        self.database_list = list()  # 待恢复数据库列表
        self.instance_name = ""
        self.node_ip = ""
        self.database_num = 0
        self.auth_mode = ""
        self.user_name = ""
        self.pass_prefix = ""

    @staticmethod
    def prepare_file_info(database, meta_info):
        # 新位置恢复返回数据库文件路径信息
        restore_info = ""
        if not meta_info:
            return restore_info
        for info in meta_info.get(database):
            restore_info = "{},{}".format(restore_info, f"move '{info.get('name')}' to '{info.get('physical_name')}'")
        return restore_info

    def exec_restore_pre_job(self):
        self.write_progress_to_file(SubJobStatusEnum.COMPLETED.value,
                                    100, "", SQLServerProgressFileType.COMMON)
        return True

    def parse_group_name(self):
        # 解析待恢复可用性组名称
        copies = self._json_param.get("job", {}).get("copies")
        if not copies:
            raise Exception("Failed to get copies")
        for copy in copies:
            if copy.get("type") in [CopyDataTypeEnum.FULL_COPY.value,
                                    CopyDataTypeEnum.S3_ARCHIVE.value, CopyDataTypeEnum.TAP_ARCHIVE.value]:
                self.group_name = copy.get("protectObject", {}).get("name", None)
                break

    def get_env_info(self, num, node_info):
        self.instance_name = node_info.get("extendInfo", {}).get("instance")
        self.node_ip = node_info.get("endpoint")
        self.auth_mode = get_env_variable(f"job_targetEnv_nodes_{num}_auth_authType_{self._p_id}")
        self.user_name = get_env_variable(f"job_targetEnv_nodes_{num}_auth_authKey_{self._p_id}")
        self.pass_prefix = f'job_targetEnv_nodes_{num}_auth_authPwd_{self._p_id}'
        self.check_and_prepare_user_info(self.instance_name)

    def get_hostname(self):
        cmd = 'hostname'
        ret, std_out, std_err = execute_cmd(cmd)
        if int(ret) != 0:
            return ""
        return std_out.strip()

    def prepare_user_info(self):
        self.parse_group_name()
        if not self.group_name:
            raise Exception("Failed to get group name")
        host_name = self.get_hostname()
        # 可用性组认证信息从targetEnv中获取
        nodes = self._json_param.get("job", {}).get("targetEnv", {}).get("nodes", [])
        if not nodes:
            raise Exception("Failed to get nodes info")
        for idx, node in enumerate(nodes):
            if node.get("name") == host_name:
                self.get_env_info(idx, node)
                break

    def check_new_restore_in_local_node(self, copy_info):
        # 新位置检查可用性组是否存在
        ret = self.sql_utils.check_group_is_exist(self.group_name)
        if ret:
            output_action_result(self._p_id, SQLServerCode.FAILED.value,
                                 BodyErr.RESTORE_FAIL_FOR_ALWAYSON_EXIST.value, "")
            log.error(f"Target group: {self.group_name} exist. can not restore")
            return False
        # 新位置检查集群节点数量是否满足恢复
        current_node = self._json_param.get("job", {}).get("targetEnv", {}).get("nodes", [])
        src_replica_info = copy_info.get("replicaInfo", [])
        if len(src_replica_info) <= len(current_node):
            log.info("Succeed to exec check job for new restore")
            return True
        output_action_result(self._p_id, SQLServerCode.FAILED.value,
                             BodyErr.RESTORE_FAIL_FOR_NODE_NOT_MEET.value, "")
        log.error("Current cluster can not restore for nodes is less than replica")
        return False

    def check_original_restore_in_local_node(self):
        """
        检查原位置恢复是否支持：主副本节点
        :return:
        """
        ret = self.sql_utils.check_is_primary_replica(self.group_name)
        if not ret:
            output_action_result(self._p_id, SQLServerCode.FAILED.value, BodyErr.ERR_PLUGIN_CANNOT_BACKUP.value, "")
            log.error("Local replica is not primary replica")
            return False
        return True

    def allow_restore_in_local_node(self):
        log.info(f"Start to exec allow in local. pid: {self._p_id} jobId: {self._job_id}")
        if check_is_sub_job(self._json_param):
            log.info(f"Allow sub job. pid: {self._p_id} jobId: {self._job_id}")
            return True
        self.prepare_user_info()
        # 检查实例是否运行
        if not self.sql_utils.check_service_is_running(self.instance_name):
            output_action_result(self._p_id, SQLServerCode.FAILED.value,
                                 BodyErr.INSTANCE_STATE_ABNORMAL.value, "", [self.node_ip, self.instance_name])
            log.error(f"The instance: {self.instance_name} is not running")
            return False
        # 检查版本是否一致
        copy_info = self.get_extend_info()
        if not self.sql_utils.check_version_is_matched(copy_info.get("version", "")):
            output_action_result(self._p_id, SQLServerCode.FAILED.value,
                                 BodyErr.RESTORE_FAIL_FOR_VERSION_INCONSISTENCY.value, "")
            log.error(f"Version is different and can not restore")
            return False
        is_new_restore = is_new_restore_path(self._json_param)
        if is_new_restore:
            # 新位置恢复检查
            return self.check_new_restore_in_local_node(copy_info)
        # 原位置恢复检查
        return self.check_original_restore_in_local_node()

    def build_sub_job(self, job_name, priority, policy, node_id: str = None):
        return SubJobModel(
            jobId=self._job_id, jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value,
            jobName=job_name, jobPriority=priority, policy=policy, execNodeId=node_id).dict(by_alias=True)

    def gen_sub_job(self):
        """
        拆分子任务
        :return:
        """
        extend_info = self.get_extend_info()
        src_node_info = [node.get("node_name") for node in extend_info.get("nodeInfo", [{}])]
        current_nodes = self._json_param.get("job", {}).get("targetEnv", {}).get("nodes", [])
        if not current_nodes or not src_node_info:
            log.error("Failed for node info is empty")
            return False
        host_name = self.get_hostname()
        # 拆分任务节点执行指定任务，从current_nodes移除
        for node in current_nodes:
            if node.get("name") == host_name:
                current_nodes.remove(node)
                break
        # 根据副本个数随机选择节点执行恢复
        node_list = sample(current_nodes, len(src_node_info) - 1)
        log.info("Succeed to get node info and start to gen sub job")
        job_list = list()
        # 生成子任务时保存实例信息，创建可用性组时使用
        instance_info = [host_name]
        # 主节点子任务，拆分任务节点执行
        job_list.append(self.build_sub_job(RestoreSubJobName.CHECK_JOB, SubJobPriorityEnum.JOB_PRIORITY_1.value,
                                           SubJobPolicyEnum.LOCAL_NODE.value))
        job_list.append(self.build_sub_job(RestoreSubJobName.PRIMARY_JOB, SubJobPriorityEnum.JOB_PRIORITY_2.value,
                                           SubJobPolicyEnum.LOCAL_NODE.value))
        is_new_restore = is_new_restore_path(self._json_param)
        # 辅助节点子任务，指定节点执行
        for node in node_list:
            job_list.append(self.build_sub_job(
                RestoreSubJobName.CHECK_JOB, SubJobPriorityEnum.JOB_PRIORITY_1.value,
                SubJobPolicyEnum.FIXED_NODE.value, node.get("id")))
            job_list.append(self.build_sub_job(
                RestoreSubJobName.SECONDARY_JOB, SubJobPriorityEnum.JOB_PRIORITY_3.value,
                SubJobPolicyEnum.FIXED_NODE.value, node.get("id")))
            instance_info.append(node.get("name"))
        # 新位置恢复保存hostname信息
        if is_new_restore:
            self.get_cache_path()
            file_path = os.path.join(self.cache_path, "instance_info")
            log.info(f"Save instance info, path: {file_path}, json: {instance_info}")
            output_execution_result_ex(file_path, instance_info)
        file_path = os.path.join(ParamConstant.RESULT_PATH, f"result{self._p_id}")
        output_execution_result_ex(file_path, job_list)
        return True

    def exec_gen_sub_job(self):
        log.info(f"Start to generate sub tasks. pid: {self._p_id}, jobId: {self._job_id}")
        try:
            result = self.gen_sub_job()
        except Exception as exception_str:
            log.exception(
                f"Exception when generate sub tasks. pid: {self._p_id}, jobId: {self._job_id}, error:{exception_str}")
            return False
        log.info(f"Generate sub tasks result: {result}. pid: {self._p_id}, jobId: {self._job_id}")
        return result

    def create_availability_group(self):
        # 创建可用性组
        copy_info = self.get_extend_info()
        replica_info = copy_info.get("replicaInfo")
        replica_list = list()
        for replica in replica_info:
            replica_list.append(f"AVAILABILITY_MODE={replica.get('availability_mode_desc')},"
                                f"FAILOVER_MODE={replica.get('failover_mode_desc')}")
        # 拆分子任务时已保存实例名称
        file_path = os.path.join(self.cache_path, "instance_info")
        instance_info = self.read_temp_file(file_path)
        replica_num = len(replica_list)
        instance_num = len(instance_info)
        if replica_num != instance_num:
            log.error(f"Inconsistent recovery information, replica num: {replica_num}, instance num: {instance_num}")
            raise Exception("Inconsistent recovery information")
        port = self.sql_utils.get_availability_endpoint()
        cmd = f"CREATE AVAILABILITY GROUP [{self.group_name}] for REPLICA ON"
        for i in range(replica_num):
            info = f"'{instance_info[i]}' WITH (ENDPOINT_URL='TCP://{instance_info[i]}:{port}',{replica_list[i]})"
            if i == replica_num - 1:
                cmd = "{} {};".format(cmd, info)
            else:
                cmd = "{} {},".format(cmd, info)
        _, std_out, _ = self.sql_utils.get_command_result(f"sqlcmd {self.user_info} -Q \"{cmd}\"")
        if "Failed to create" in std_out:
            log.error(f"Failed to create availability group: {self.group_name} as out: {std_out}")
            raise Exception("Failed to create availability group")
        log.info(f"Succeed to create availability group: {self.group_name}")

    def delete_availability_group(self):
        cmd = f"DROP AVAILABILITY GROUP [{self.group_name}];"
        code, std_out, std_err = self.sql_utils.get_command_result(f"sqlcmd {self.user_info} -Q \"{cmd}\"")
        if code == ExecCmdResult.SUCCESS:
            log.info(f"Succeed to delete availability group: {self.group_name}")
            return
        log.error(f"Failed to delete availability group: {self.group_name}, std_out: {std_out}, std_err: {std_err}")

    def get_restore_databases(self):
        # 移除不满足恢复条件的数据库
        restore_sub_object = self._json_param.get("job", {}).get("restoreSubObjects", [])
        if not restore_sub_object:
            copy_info = self.get_extend_info()
            self.database_list = copy_info.get("databaseList", [])
        else:
            # 支持细粒度恢复
            for sub_object in restore_sub_object:
                self.database_list.append(sub_object.get("name", "/").strip('/'))

    def calculate_interval_progress(self):
        try:
            ProgressConst.INTERVAL_PROGRESS = int(ProgressConst.MAX_PROGRESS / self.database_num)
        except ZeroDivisionError as err:
            raise Exception("The divisor is 0.") from err

    def do_restore(self, database, vdi_type, data_path, meta_path, recovery_time: str = ""):
        log.info(f"Start to restore database: {database}")
        path = os.path.join(data_path, f"{database}.bak")
        if not os.path.exists(path):
            log.error(f"Copy info: {path} not exist")
            raise Exception(f"Copy path: {path} not exist")
        log.info(f"new_path:{self.new_restore_path}")
        restore_info = self.get_restore_info(database, self.new_restore_path, meta_path)
        channel = get_restore_channel(data_path, database)
        cmd = f"{ParamConstant.VDI_TOOL_PATH} {vdi_type} \"{database}\" \"{path}\" \"{self.vdi_info}\" " \
              f"\"{recovery_time}\" \"{restore_info}\" {channel}"
        code, std_out, std_err = self.sql_utils.get_command_result(cmd)
        log.info(f"cmd:{cmd},code:{code},std_out:{std_out},std_err:{std_err}")
        if SqlSeverErrorKeyword.INUSE in std_out:
            log.error(f"Failed to restore database: {database}, out: {std_out}, err: {std_err}")
            raise Exception(f"{database} is in use")
        if code == ExecCmdResult.SUCCESS:
            log.info(f"End to restore database: {database}")
            return
        log.error(f"Failed to restore database: {database}, out: {std_out}, err: {std_err}")
        raise Exception(f"Failed to restore database")

    def report_running_progress(self):
        SubJobExecResult.SUCCESS_NUM += 1
        ProgressConst.PROGRESS += ProgressConst.INTERVAL_PROGRESS
        self.running_tail.log_info_param = [
            self._sub_job_id, f"{self.database_num - SubJobExecResult.SUCCESS_NUM}",
            f"{SubJobExecResult.SUCCESS_NUM}"
        ]
        self.write_progress_to_file(SubJobStatusEnum.RUNNING.value, ProgressConst.PROGRESS,
                                    self.running_tail, self._sub_job_id)

    def full_restore(self):
        log.info(f"Start to full restore databases.")
        if not self.full_copy:
            log.error("Full copy not exit")
            raise Exception("Full copy not exist")
        if self.sub_job_name == RestoreSubJobName.PRIMARY_JOB and \
                self.restore_type == RestoreTypeEnum.FULL_RESTORE.value:
            vdi_type = VDIActionType.FULL_RESTORE_WITH_REPLACE.value
        else:
            vdi_type = VDIActionType.DIFF_RESTORE_WITH_NO_RECOVERY_REPLACE.value
        for database in self.database_list:
            log.info(f"Start to restore database: {database}")
            self.do_restore(database, vdi_type, self.full_copy.get("dataPath"), self.full_copy.get("metaPath"))
            self.report_running_progress()
            log.info(f"Succeed to restore database: {database}")
        log.info("End to full restore")

    def diff_restore(self):
        log.info(f"Start to diff restore databases")
        if not self.full_copy:
            log.error("Full copy not exist")
            raise Exception("Full copy not exist")
        for database in self.database_list:
            log.info(f"Start to restore database: {database}")
            # 先进行全量恢复
            vdi_type = VDIActionType.DIFF_RESTORE_WITH_NO_RECOVERY_REPLACE.value
            self.do_restore(database, vdi_type, self.full_copy.get("dataPath"), self.full_copy.get("metaPath"))
            # 进行差异恢复
            if self.sub_job_name == RestoreSubJobName.PRIMARY_JOB:
                vdi_type = VDIActionType.DIFF_RESTORE_WITH_RECOVERY.value
            self.do_restore(database, vdi_type, self.diff_copy.get("dataPath"), self.diff_copy.get("metaPath"))
            self.report_running_progress()
            log.info(f"Succeed to restore database: {database}")
        log.info("End to diff restore")

    def log_restore(self):
        log.info(f"Start to log restore databases")
        if not self.full_copy:
            log.error("Full copy not exit")
            raise Exception("Full copy not exist")
        restore_time = self._json_param.get("job", {}).get("extendInfo", {}).get("restoreTimestamp", "")
        if restore_time:
            copies = self._json_param.get("job", {}).get("copies", [])
            restore_time = get_format_restore_time(restore_time, copies)
        last_copy_num = len(self.log_copy.get("dataPath")) - 1
        for database in self.database_list:
            log.info(f"Start to restore database: {database}")
            # 先进行全量恢复
            vdi_type = VDIActionType.DIFF_RESTORE_WITH_NO_RECOVERY_REPLACE.value
            self.do_restore(database, vdi_type, self.full_copy.get("dataPath"), self.full_copy.get("metaPath"))
            # 若存在差异副本，进行差异恢复
            if self.diff_copy:
                vdi_type = VDIActionType.LOG_RESTORE_WITH_NO_RECOVERY.value
                self.do_restore(database, vdi_type, self.diff_copy.get("dataPath"), self.diff_copy.get("metaPath"))
            # 进行日志恢复
            vdi_type = VDIActionType.POINT_IN_TIME_RESTORE_WITH_NO_RECOVERY.value
            for k, copy in enumerate(self.log_copy.get("dataPath")):
                self.do_restore(database, vdi_type, copy, copy, restore_time)
                if k == last_copy_num and self.sub_job_name == RestoreSubJobName.PRIMARY_JOB:
                    vdi_type = VDIActionType.POINT_IN_TIME_RESTORE_WITH_RECOVERY.value
                    self.do_restore(database, vdi_type, copy, copy, restore_time)
            self.report_running_progress()
            log.info(f"Succeed to restore database: {database}")
        log.info("End to log restore")
        return True

    def database_restore(self):
        if self.restore_type == RestoreTypeEnum.FULL_RESTORE.value:
            self.full_restore()
        elif self.restore_type == RestoreTypeEnum.POINT_IN_TIME_RESTORE.value:
            self.diff_restore()
        else:
            self.log_restore()

    def exec_check_job(self):
        # 检查实例是否运行
        if not self.sql_utils.check_service_is_running(self.instance_name):
            self.restore_fail_tail.log_detail = BodyErr.INSTANCE_STATE_ABNORMAL.value
            self.restore_fail_tail.log_detail_param = [self.node_ip, self.instance_name]
            log.error(f"The instance: {self.instance_name} is not running")
            raise Exception("The instance is not running")
        # 检查用户权限
        if not self.sql_utils.check_user_permission(self.auth_mode, self.user_name):
            self.restore_fail_tail.log_detail = BodyErr.SQLSERVER_PERMISSIONS_ERROR.value
            log.error("Check user permission failed")
            raise Exception("Check user permission failed")

    def exec_original_restore(self):
        log.info(f"Start to exec original restore sub job name: {self.sub_job_name}, job_id:{self._job_id}")
        if self.sub_job_name == RestoreSubJobName.CHECK_JOB:
            self.restore_success_tail.log_info = SqlServerReportLabel.SUB_JOB_SUCCESS_LABEL
            self.exec_check_job()
            # 恢复前检查是否有处于使用中的数据库
            dt_list = self.sql_utils.check_database_in_use(self.database_list)
            if dt_list:
                self.restore_fail_tail.log_detail = BodyErr.SQLSERVER_DATABASE_IN_USE_ERROR.value
                self.restore_fail_tail.log_detail_param = [self.sql_utils.list_to_str(dt_list)]
                log.error(f"The databases: {dt_list} is in use")
                raise Exception("The databases is in use")
            return True
        if self.sub_job_name == RestoreSubJobName.PRIMARY_JOB:
            for database in self.database_list:
                self.sql_utils.drop_database_from_always_on(self.group_name, database)
            log.info("Success to drop database form always on group")
        self.new_restore_path = ""
        for database in self.database_list:
            self.sql_utils.set_database_offline(database)
        self.database_restore()
        log.info("End to restore database")
        if self.sub_job_name == RestoreSubJobName.PRIMARY_JOB:
            self.sql_utils.alter_database_readable_secondary(self.group_name)
            # 将数据库加入可用性组 -- 主节点
            for database in self.database_list:
                self.sql_utils.add_database_to_always_on(self.group_name, database)
            log.info("Succeed to add databases")
        else:
            # 辅助节点数据库联接可用性组
            for database in self.database_list:
                self.sql_utils.secondary_database_join_always_on(self.group_name, database)
            log.info("Succeed to join secondary database")
        self.restore_success_tail.log_info_param = [self._sub_job_id, f"{self.database_num}"]
        return True

    def exec_new_restore(self):
        self.get_restore_new_path()
        log.info(f"Start to exec new restore sub job name: {self.sub_job_name}")
        if self.sub_job_name == RestoreSubJobName.CHECK_JOB:
            self.restore_success_tail.log_info = SqlServerReportLabel.SUB_JOB_SUCCESS_LABEL
            self.exec_check_job()
            # 是否存在同名数据库
            ret = self.sql_utils.check_db_is_exist(self.database_list)
            if ret:
                self.restore_fail_tail.log_detail = BodyErr.RESTORE_FAIL_FOR_DATABASE_EXIST.value
                log.error("Can not restore for database exist")
                raise Exception("Can not restore for database exist")
            # 新位置是否存在同名文件
            return self.check_same_filename(self.new_restore_path)
        elif self.sub_job_name == RestoreSubJobName.PRIMARY_JOB:
            # 主副本节点创建可用性组
            self.create_availability_group()
            # 恢复数据
            self.database_restore()
            # 主数据库加入可用性组
            for database in self.database_list:
                self.sql_utils.add_database_to_always_on(self.group_name, database)
            log.info("Succeed to add databases")
        else:
            # 恢复数据
            self.database_restore()
            # 辅助副本加入可用性组
            self.sql_utils.secondary_replica_add_always_on(self.group_name)
            # 辅助数据库加入可用性组
            for database in self.database_list:
                self.sql_utils.secondary_database_join_always_on(self.group_name, database)
            log.info("Succeed to join secondary database")
        self.restore_success_tail.log_info_param = [self._sub_job_id, f"{self.database_num}"]
        return True

    def do_sub_job(self):
        self.get_copy_info()
        self.sub_job_name = self._json_param.get("subJob", {}).get("jobName", None)
        is_new_restore = is_new_restore_path(self._json_param)
        # 查询可进行恢复的数据库
        self.get_restore_databases()
        if not self.database_list:
            log.error("No database can restore")
            raise Exception("No database can restore")
        self.database_num = len(self.database_list)
        self.prepare_user_info()
        self.calculate_interval_progress()
        self.running_tail.log_info_param = [self._sub_job_id, f"{self.database_num}", "0"]
        self.write_progress_to_file(SubJobStatusEnum.RUNNING.value, ProgressConst.PROGRESS,
                                    self.running_tail, self._sub_job_id)
        if not is_new_restore:
            result = self.exec_original_restore()
        else:
            result = self.exec_new_restore()
        return result

    def exec_sub_job(self):
        log.info(f"Start to exec sub job, pid: {self._p_id}, jobId: {self._job_id}, subJobId: {self._sub_job_id}")
        try:
            result = self.do_sub_job()
        except Exception as err:
            self.write_progress_to_file(SubJobStatusEnum.FAILED.value, ProgressConst.PROGRESS_ONE_HUNDRED,
                                        self.restore_fail_tail, self._sub_job_id)
            log.error(f"Exception when do sub job as err: {err}, pid: {self._p_id}, jobId: {self._job_id}, "
                      f"subJobId: {self._sub_job_id}")
            return False
        if not result:
            log.error(f"Failed to restore. pid: {self._p_id}, jobId: {self._job_id}, subJobId: {self._sub_job_id}")
            return False
        self.write_progress_to_file(SubJobStatusEnum.COMPLETED.value, ProgressConst.PROGRESS_ONE_HUNDRED,
                                    self.restore_success_tail, self._sub_job_id)
        log.info(f"Succeed to exec sub job. pid: {self._p_id}, jobId: {self._job_id}, subJobId: {self._sub_job_id}")
        return True

    def report_restore_progress(self):
        return self.report_progress_comm(self._sub_job_id)

    def clear_job(self):
        # 检查可用性组是否存在，存在则删除
        if self.sql_utils.check_group_is_exist(self.group_name):
            log.info(f"Availability group: {self.group_name} exist, start to delete")
            self.delete_availability_group()
        instance_databases = self.sql_utils.get_instance_databases()
        for database in self.database_list:
            log.info(f"Start to check and delete database: {database}")
            if database not in instance_databases:
                log.info(f"Database: {database} not exist")
                continue
            self.sql_utils.drop_database(database)

    def post_job(self):
        is_new_restore = is_new_restore_path(self._json_param)
        self.get_restore_databases()
        self.prepare_user_info()
        # 非新位置恢复直接返回
        if not is_new_restore:
            log.info("Not new restore")
            return
        # 恢复成功直接返回
        restore_result = self._json_param.get("restoreJobResult", 1)
        if restore_result == 0:
            log.info("Restore succeed, no need to clear resource")
            return
        log.info("Restore failed, start to clear resource")
        # 恢复失败清理资源
        self.get_cache_path()
        file_path = os.path.join(self.cache_path, "instance_info")
        instance_info = self.read_temp_file(file_path)
        # 新位置恢复下发整个集群节点，只在选中的节点进行清理
        host_name = self.get_hostname()
        if host_name not in instance_info:
            log.info(f"Current node: {host_name} not exec restore, no need clear")
            return
        self.clear_job()

    def exec_restore_post(self):
        self.write_progress_to_file(SubJobStatusEnum.RUNNING.value, ProgressConst.PROGRESS,
                                    "", self._sub_job_id)
        try:
            self.post_job()
        except Exception as err:
            log.exception(f"Exception when post job, {err}")
            self.write_progress_to_file(SubJobStatusEnum.FAILED.value, ProgressConst.PROGRESS_ONE_HUNDRED,
                                        "", self._sub_job_id)
            return False
        self.write_progress_to_file(SubJobStatusEnum.COMPLETED.value, ProgressConst.PROGRESS_ONE_HUNDRED,
                                    "", self._sub_job_id)
        return True

    def get_restore_info(self, database, new_path, meta_path):
        restore_info = ""
        if not new_path:
            return restore_info
        if not os.path.exists(new_path):
            os.makedirs(new_path)
        # 新位置恢复，准备数据库文件信息
        meta_info = self.get_meta_info(meta_path)
        info_json = meta_info.get(database)
        for info in info_json:
            physical_name = os.path.join(new_path, info.get('physical_name'))
            restore_info = "{},{}".format(restore_info, f"move '{info.get('name')}' to '{physical_name}'")
        log.info(f"restore_info:{restore_info}")
        return restore_info
