#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import time
import datetime
import shutil
from abc import ABC, abstractmethod
from threading import Thread
from pathlib import Path

from common.common_models import SubJobModel, SubJobDetails, LogDetail
from common.parse_parafile import get_env_variable, add_env_param
from common.common import check_path_legal, convert_timestamp_to_time, check_sql_cmd_param, execute_cmd, CMDResult
from common.cleaner import clear
from common.const import SubJobStatusEnum, SubJobTypeEnum, SubJobPolicyEnum, DBLogLevel, \
    ParamConstant
from common.exception.common_exception import ErrCodeException
from generaldb.saphana.restore.saphana_parse_restore_params import SaphanaParseRestoreParam
from generaldb.saphana.comm.saphana_file_operate import log
from generaldb.saphana.resources.saphana_cluster_manager import SaphanaClusterManage
from generaldb.saphana.comm.saphana_common_function import CommonFuction
from generaldb.saphana.comm.saphana_resource_param import SaphanaResourceParam
from generaldb.saphana.comm.saphana_const import SaphanaSubjobName, SaphanaJsonConstant, SaphanaReportInterval, \
    SaphanaProgressPhase, SaphanaRentryCount, SaphanaJobType, SaphanaConst, SaphanaPath, SaphanaTaskLabel, \
    SaphanaErrorCode, SaphanaMetadataKey
from generaldb.saphana.resources.saphana_resource_service import AUTH_CUSTOM_PARAMS
from saphanay.backup.log_backup_saphana import LogBackupSAPHANA
from common.parse_parafile import get_env_variable
from saphanay.comm.error_code_exception import ErrorCodeException
from saphanay.restore.restore_saphana import RestoreSAPHANA


class SaphanaRestoreParent(ABC):
    def __init__(self, pid, job_id, subjob_id, parse_params_obj: SaphanaParseRestoreParam):
        if not parse_params_obj:
            log.error("Parse params obj is null.")
            raise Exception("Parse params obj is null.")
        self._pid = pid
        self._job_id = job_id
        self._subjob_id = subjob_id
        self._parse_param = parse_params_obj
        self_define_params = self._parse_param.get_self_define_param()
        if not self_define_params:
            raise Exception("Fail to get self define params.")
        self._sid = self._parse_param.get_specified_self_define_params(SaphanaJsonConstant.SYSTEM_ID).lower()
        if not CommonFuction.saphana_check_os_user(self._sid):
            raise Exception("Sid error.")
        self._system_db_port = self._parse_param.get_specified_self_define_params(SaphanaJsonConstant.SYSTEM_DB_PORT)
        self._system_db_user = self._parse_param.get_system_db_user()
        self.init_system_db_param()
        self._db_name = self._parse_param.get_resotre_db_name()
        self._saphana_cluster = SaphanaClusterManage({SaphanaJsonConstant.SYSTEM_ID: self._sid,
                                                      SaphanaJsonConstant.SYSTEM_DB_PORT: self._system_db_port,
                                                      SaphanaJsonConstant.PROTECT_DATABASE: self._db_name,
                                                      SaphanaJsonConstant.SYSTEM_DB_USER: self._system_db_user},
                                                     self._pid)
        self._error_code = 0
        self._error_param_list = []
        self._report_progress_thread_start = False
        self.init_dict()
        self._get_progress_func = None
        self._ignore_delta = True

    def init_system_db_param(self):
        # 获取备份的数据库的用户和密码
        # 获取自定义参数
        sensitive_custom_params = get_env_variable(f"{SaphanaJsonConstant.JOB}_"
                                                   f"{SaphanaJsonConstant.TARGET_ENV}_"
                                                   f"{SaphanaJsonConstant.AUTH}_"
                                                   f"{SaphanaJsonConstant.EXTEND_INFO}_"
                                                   f"{SaphanaJsonConstant.AUTH_CUSTOM_PARAMS}_{self._pid}")
        SaphanaResourceParam.check_custom_params(sensitive_custom_params, AUTH_CUSTOM_PARAMS)
        custom_param_dict = SaphanaResourceParam.init_custom_params(sensitive_custom_params)
        if custom_param_dict.get(SaphanaJsonConstant.SYSTEM_DB_USER):
            self._system_db_user = custom_param_dict.get(SaphanaJsonConstant.SYSTEM_DB_USER)
        if custom_param_dict.get(SaphanaJsonConstant.SYSTEM_DB_PASSWORD):
            system_db_password = custom_param_dict.get(SaphanaJsonConstant.SYSTEM_DB_PASSWORD)
            # 将系统数据库密码放到环境变量中
            add_env_param(SaphanaJsonConstant.SYSTEM_DB_PASSWORD + self._pid, system_db_password)
            clear(system_db_password)
        clear(sensitive_custom_params)

    def init_dict(self):
        self._func_map = {
            SaphanaJobType.RESTORE_PREDECESSOR: [
                self.restore_prerequisite,
                self.get_progress_comm
            ],
            SaphanaJobType.RESTORE_RESTORE: [
                self.exec_restore_sub_job,
                self.get_progress_comm
            ],
            SaphanaJobType.RESTORE_POST: [
                self.restore_post,
                self.get_progress_comm
            ]
        }

    def gen_sub_job(self):
        # 拆分子任务，每个节点拆分一个准备子任务，再拆一个通用的恢复子任务
        hana_nodes = self._parse_param.get_nodes()
        if not hana_nodes:
            log.error(f"Get nodes failed. {self._parse_param.get_log_common()}")
            return []
        sub_job_array = []
        sub_job = SubJobModel(jobId=self._job_id, subJobId="", jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value,
                              jobName=SaphanaSubjobName.RESTORE, jobPriority=2, policy=SubJobPolicyEnum.ANY_NODE.value,
                              ignoreFailed=False, jobInfo="").dict(by_alias=True)
        sub_job_array.append(sub_job)
        for node in hana_nodes:
            # 每个节点拆一个白名单子任务
            host_id = self._parse_param.get_hostid_by_node(node)
            sub_job = SubJobModel(jobId=self._job_id, subJobId="", jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value,
                                  jobName=SaphanaSubjobName.PREPARE, jobPriority=1,
                                  policy=SubJobPolicyEnum.FIXED_NODE.value,
                                  ignoreFailed=False, execNodeId=host_id, jobInfo="").dict(by_alias=True)
            sub_job_array.append(sub_job)
        log.info(f"Gen sub job success. {self._parse_param.get_log_common()}")
        return sub_job_array

    def create_symlink(self, src, dest, dir_instance):
        if not check_path_legal(dest, dir_instance):
            log.error(f"Path is Unlawful. {self._parse_param.get_log_common()}")
            return False
        user = f"{self._sid}adm"
        if os.path.exists(dest):
            # 如果目录存在就先将目录删除
            cmd_str = f"unlink {dest}"
            ret, out = CommonFuction.exec_shell_cmd(cmd_str, user)
            if not ret:
                log.error(f"Rm symlink path failed. {out}. {self._parse_param.get_log_common()}")
                return False
        # 创建软连接
        cmd_str = f"ln -s {src} {dest}"
        ret, out = CommonFuction.exec_shell_cmd(cmd_str, user)
        if not ret:
            log.error(f"Ln symlink path failed. {out}. {self._parse_param.get_log_common()}")
            return False
        return True

    def chown_copy_dir(self):
        path_array = []
        uid, gid = self._parse_param.get_query_permission_restore()
        if not uid or not gid:
            log.error(f"Fail to get job permission.{uid}, {gid}. {self._parse_param.get_log_common()}")
            return False
        data_path = self._parse_param.get_data_path()
        if not data_path:
            log.error(f"Get data path failed. {self._parse_param.get_log_common()}")
            return False
        data_copy_id_array, _ = self._parse_param.get_all_copy_id()
        if not data_copy_id_array:
            log.error(f"Get data copy id failed. {self._parse_param.get_log_common()}")
            return False
        for data_copy_id in data_copy_id_array:
            path_array.append(os.path.join(data_path, data_copy_id))
            # 如果是日志恢复，还需要处理日志仓
        if self._parse_param.get_log_restore_timestamp():
            log_path_array = self._parse_param.get_all_log_path()
            if not log_path_array:
                log.error(f"Get log path array failed. {self._parse_param.get_log_common()}")
                return False
            path_array.extend(log_path_array)

        for path in path_array:
            if not check_path_legal(path, SaphanaPath.HANA_LIVE_MOUNT_PATH):
                log.error(f"Path is invaild. {self._parse_param.get_log_common()}")
                return False
            cmd_str = f"chown -hR {uid}:{gid} {path}"
            ret, out = CommonFuction.exec_shell_cmd(cmd_str)
            if not ret:
                log.error(f"Chown path failed. {out}. {self._parse_param.get_log_common()}")
                return False
        log.info(f"Chown success. {self._parse_param.get_log_common()}")
        return True

    def exec_prepare_job(self):
        try:
            dir_instance = self._saphana_cluster.get_dir_instance()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False

        # 先处理数据仓
        data_path = self._parse_param.get_data_path()
        if not data_path:
            log.error(f"Get data path failed. {self._parse_param.get_log_common()}")
            return False
        dest_data_path = os.path.join(dir_instance, f"data_{self._job_id}")
        ret = self.create_symlink(data_path, dest_data_path, dir_instance)
        if not ret:
            log.error(f"Ln data path failed. {self._parse_param.get_log_common()}")
            return False
        ret = self.deal_copy_data_dir()
        if not ret:
            log.error(f"Deal copy data failed. {self._parse_param.get_log_common()}")
            return False
        log.info(f"Create data path link success. {self._parse_param.get_log_common()}")
        # 如果是日志恢复，还需要处理日志仓
        if not self._parse_param.get_log_restore_timestamp():
            # 非日志恢复 直接返回成功
            return True
        log_path = self._parse_param.get_log_restore_log_path()
        if not log_path:
            log.error(f"Get log path failed. {self._parse_param.get_log_common()}")
            return False
        dest_log_path = os.path.join(dir_instance, f"log_{self._job_id}")
        ret = self.create_symlink(os.path.dirname(log_path), dest_log_path, dir_instance)
        if not ret:
            log.error(f"Ln log path failed. {self._parse_param.get_log_common()}")
            return False
        log.info(f"Create log path link success. {self._parse_param.get_log_common()}")
        log.warning(f"Will clean log backup path.")
        self.clean_log_backup_path()
        return True

    def clean_log_backup_path(self) -> None:
        saphana = LogBackupSAPHANA(self._sid, self._db_name, self._system_db_port, self._system_db_user, "")
        try:
            log_backup_path = saphana.get_log_backup_path()
        except Exception as e:
            log.info(f"No log backup path.")
            return
        if not os.path.exists(log_backup_path):
            log.info(f"No log backup path.")
            return
        files = [str(f.absolute()) for f in Path(log_backup_path).iterdir() if f.is_file() and f.suffix[1:].isdigit()]
        log.warning(f"Clean log backup path. {files}")
        [os.remove(f) for f in files]

    def deal_copy_data_dir(self):
        """
        HANA恢复需要还原备份时的目录结构
        """
        data_copy_backup_path_array = self._parse_param.get_all_data_copy_backup_path()
        if not data_copy_backup_path_array:
            log.info(f"Get data copy backup path failed. {self._parse_param.get_log_common()}")
            return False

        # 创建链接出来
        copy_id_array, _ = self._parse_param.get_all_copy_id()
        if not copy_id_array:
            log.error(f"Get all data copy id error. {self._parse_param.get_log_common()}")
            return False

        if len(copy_id_array) != len(data_copy_backup_path_array):
            log.error(f"Copy count dismatch. {self._parse_param.get_log_common()}")
            return False
        try:
            dir_instance = self._saphana_cluster.get_dir_instance()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False
        if not dir_instance:
            log.error(f"Get dir instance is null. {self._parse_param.get_log_common()}")
            return False
        for copy_id in copy_id_array:
            src_path = os.path.join(dir_instance, f"data_{self._job_id}", f"{copy_id}", "data")
            if not os.path.exists(src_path):
                log.error(f"Src path not exist. {self._parse_param.get_log_common()}")
                return False
            ret = self.create_symlink_and_chown(src_path, data_copy_backup_path_array[copy_id_array.index(copy_id)])
            if not ret:
                log.error(f"Create symlink failed. {self._parse_param.get_log_common()}")
                return False
        return True

    def allow_restore(self):
        # 由子类实现
        pass

    def allow_restore_in_local_node(self):
        # 判断子任务是否能在当前节点执行
        # 只有恢复子任务判断
        sub_job_name = self._parse_param.get_subjob_name()
        if sub_job_name == SaphanaSubjobName.PREPARE:
            return True
        return self.allow_restore()

    def get_errcode(self):
        return self._error_code

    def check_restore_version(self, copy_version):
        try:
            local_version = self._saphana_cluster.get_version()
        except Exception as exception_str:
            # 获取版本失败，返回版本检查通过
            log.warning(f"Get version failed. {self._parse_param.get_log_common()}")
            return True
        if not local_version:
            log.warning(f"Get version failed. {self._parse_param.get_log_common()}")
            return True

        # HANA版本号示例：2.00.020.00.1500920972 我们只需用到主版本号
        copy_version_array = copy_version.split(".")
        local_version_array = local_version.split(".")
        if not copy_version_array or not local_version_array:
            log.error(f"Version is invaild. {self._parse_param.get_log_common()}")
            return False
        if copy_version_array[0] <= local_version_array[0]:
            log.info(f"Version check success. {self._parse_param.get_log_common()}")
            return True
        log.error(f"Version check failed. {self._parse_param.get_log_common()}")
        self._error_param_list = [copy_version, local_version]
        return False

    def check_multi_system(self, copy_multi_tenant_system):
        is_multi = self._saphana_cluster.is_multi_system()
        if is_multi == copy_multi_tenant_system:
            log.info(f"Tenant system match. {self._parse_param.get_log_common()}")
            return True
        log.error(f"Tenant system not match. {self._parse_param.get_log_common()}")

        def get_multi_str(is_multi_param):
            if is_multi_param:
                return "multi"
            return "single"

        self._error_param_list = [get_multi_str(copy_multi_tenant_system), get_multi_str(is_multi)]
        return False

    def check_restore_db_status(self):
        """
        检查指定的数据库状态是否能恢复
        """
        if self.is_system_db():
            return True
        system_db_pwd = SaphanaJsonConstant.SYSTEM_DB_PASSWORD + self._pid
        ret = self._saphana_cluster.get_database_running_status(system_db_pwd)
        if not ret:
            # 处于停止状态
            return True
        log.error(f"Database({self._db_name}) is running. {self._parse_param.get_log_common()}")
        self._error_code = SaphanaErrorCode.DB_NOT_CLOSE.value
        return False

    def check_restore_topology(self, copy_extend_info):
        """
        检查副本的拓扑结构是否与恢复目标端的保持一致
        """
        copy_db_info = copy_extend_info.get(SaphanaMetadataKey.DB_INFO.value, "")
        if not copy_db_info:
            log.warning(f"Get copy db info failed. {self._parse_param.get_log_common()}")
            return True
        system_db_pwd = SaphanaJsonConstant.SYSTEM_DB_PASSWORD + self._pid
        nodes = self._parse_param.get_nodes()
        if not nodes:
            log.warning(f"Get nodes failed. {self._parse_param.get_log_common()}")
            return True
        if len(copy_db_info) != len(nodes):
            log.error(f"Copy topology dismatch. {self._parse_param.get_log_common()}")
            self._error_code = SaphanaErrorCode.TOPOLOGY_DISMATCH.value
            return False
        return True

    def is_backup_db_systemdb(self):
        # 判断备份副本的数据库是否为系统数据库
        backup_db_name = self._parse_param.get_backup_database_name()
        backup_system_id = self._parse_param.get_backup_system_id()
        try:
            is_multi_tenant_system = self._parse_param.get_backup_multi_tenant_system()
        except Exception as err:
            log.error("Fail to get multi attr.")
            raise err
        if is_multi_tenant_system and backup_db_name.upper() == SaphanaConst.SYSTEM_DB:
            return True
        if not is_multi_tenant_system and backup_db_name.upper() == backup_system_id.upper():
            return True
        log.error(f"Can not recover from {backup_db_name} to {self._db_name}.")
        return False

    def check_system_teant_not_recover_each_other(self):
        is_system_tenant_mix = False
        if self._saphana_cluster.is_system_db() and not self.is_backup_db_systemdb():
            is_system_tenant_mix = True
        elif not self._saphana_cluster.is_system_db() and self.is_backup_db_systemdb():
            is_system_tenant_mix = True
        if is_system_tenant_mix:
            self._error_code = SaphanaErrorCode.SYSTEM_TENANT_NOT_RECOVER_EACH_OTHER
            self._error_param_list = [self._parse_param.get_backup_database_name(), self._db_name]
            return False
        return True

    def check_systemid(self, first_copy_extend_info):
        backup_system_id = first_copy_extend_info.get(SaphanaMetadataKey.SYSTEM_ID, "")
        if self._saphana_cluster.is_system_db() and backup_system_id != self._sid:
            log.error(f"Copy system id {backup_system_id} is not equal to target {self._sid}")
            self._error_code = SaphanaErrorCode.SYSTEM_ID_NOT_EQUAL.value
            self._error_param_list = [backup_system_id, self._sid]
            return False
        return True

    def check_restore_systemdb(self):
        # 系统数据库恢复时，判读是否有租户数据库在恢复
        if self._saphana_cluster.is_system_db():
            ret, db_name, job_id = self.check_flag_file()
            if ret:
                log.error(f"Check flag file failed. {self._parse_param.get_log_common()}")
                self._error_code = SaphanaErrorCode.HANA_RESTORE_BUSY.value
                self._error_param_list = [db_name, job_id]
                return False
        return True

    def restore_prerequisite(self):
        """
        前置任务：1.查询副本的拓扑结构是否和当前环境一样 2.查询副本版本和当前数据库版本 3. 查看system id是否一致
        """
        # 系统数据库与租户数据库不能互相恢复
        if not self.check_system_teant_not_recover_each_other():
            log.error(f"Check system teant not recover each other failed. {self._parse_param.get_log_common()}")
            return False
        first_copy_extend_info = self._parse_param.get_first_copy_extend_info()
        if not first_copy_extend_info:
            log.error(f"Get copy extendInfo failed. {self._parse_param.get_log_common()}")
            return False
        copy_hana_version = first_copy_extend_info.get(SaphanaJsonConstant.SAPHANA_VERSION, "")
        if copy_hana_version and not self.check_restore_version(copy_hana_version):
            self._error_code = SaphanaErrorCode.VERSION_DISMATCH.value
            return False
        is_multi = first_copy_extend_info.get(SaphanaJsonConstant.MULTI_TENANT_SYSTEM, "error")
        if is_multi != "error" and not self.check_multi_system(is_multi):
            log.error(f"Multi system error. {self._parse_param.get_log_common()}")
            self._error_code = SaphanaErrorCode.MULTI_MODE_DISMATCH.value
            return False
        if not self.check_restore_db_status():
            return False
        if not self.check_restore_topology(first_copy_extend_info):
            return False
        # 判断system id是否一致
        if not self.check_systemid(first_copy_extend_info):
            return False
        # 系统数据库恢复时，判读是否有租户数据库在恢复
        if not self.check_restore_systemdb():
            return False
        # 如果是恢复租户数据库，先判断租户数据库是否存在
        system_db_pwd = SaphanaJsonConstant.SYSTEM_DB_PASSWORD + self._pid
        if not self._saphana_cluster.is_system_db() and not self._saphana_cluster.is_tenant_db_exist(system_db_pwd):
            self._error_code = SaphanaErrorCode.DB_NOT_EXIST
            return False
        if not self._saphana_cluster.is_system_db() and not self.create_flag_file():
            log.error(f"Create flag file failed. {self._parse_param.get_log_common()}")
            return False
        return True

    @abstractmethod
    def progress_func(self):
        pass

    def exec_restore_sub_job(self):
        exec_func = None
        job_name = self._parse_param.get_subjob_name()

        if job_name == SaphanaSubjobName.RESTORE:
            exec_func = self.restore
            log_detail = LogDetail(logInfo=SaphanaTaskLabel.RESTORE_START_COPY_SUBJOB.value, \
                                   logInfoParam=[self._parse_param.get_local_ip_by_self_nodes(), self._subjob_id], \
                                   logLevel=DBLogLevel.INFO.value)
        elif job_name == SaphanaSubjobName.PREPARE:
            exec_func = self.exec_prepare_job
            log_detail = LogDetail(logInfo=SaphanaTaskLabel.RESTORE_START_MOUNT_SUBJOB.value, \
                                   logInfoParam=[self._parse_param.get_local_ip_by_self_nodes(), self._subjob_id], \
                                   logLevel=DBLogLevel.INFO.value)
        else:
            log.error(f"Subjob name({job_name}) not support. {self._parse_param.get_log_common()}")
            return False
        self.report_label(log_detail)
        return exec_func()

    def restore_post(self):
        """
        后置子任务：1.清理掉挂载目录
        """
        try:
            dir_instance = self._saphana_cluster.get_dir_instance()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False
        if not dir_instance:
            log.error(f"Get dir instance is null. {self._parse_param.get_log_common()}")
            return False
        user = f"{self._sid}adm"
        path_list = [
            os.path.join(dir_instance, f"data_{self._job_id}"),
            os.path.join(dir_instance, f"log_{self._job_id}")
        ]
        data_copy_backup_path_array = self._parse_param.get_all_data_copy_backup_path()
        if data_copy_backup_path_array:
            path_list.extend(data_copy_backup_path_array)
        for path in path_list:
            if os.path.exists(path) or os.path.islink(path):
                if not os.path.islink(path):
                    log.error(f"Path exist and not link. {self._parse_param.get_log_common()}")
                    continue
                try:
                    os.unlink(path)
                except Exception as exception_str:
                    log.error(f"Rm symlink path failed. {self._parse_param.get_log_common()}")
                    continue
        file_name = os.path.join(ParamConstant.PARAM_FILE_PATH, f"hana_systemdb_{self._job_id}")
        if os.path.exists(file_name) and check_path_legal(file_name, ParamConstant.PARAM_FILE_PATH):
            try:
                os.remove(file_name)
            except Exception as exception_str:
                log.warning(f"Rm systemdb flag file failed. {self._parse_param.get_log_common()}")
        self.delete_flag_file()
        return True

    def report_progress(self, subjob_details, rentry_count=1):
        """
        上报一次进度
        rentry_count：上报返回失败的重试次数
        """
        progress = subjob_details.get(SaphanaJsonConstant.PROGRESS, 0)
        while rentry_count >= 1:
            rentry_count -= 1
            ret = CommonFuction.report_job_details(self._job_id, subjob_details)
            if ret:
                log.info(f"Report job details success.preogress:{progress} {self._parse_param.get_log_common()}")
                break
            time.sleep(SaphanaReportInterval.REPORT_PROGRESS_INTERVAL)
        if rentry_count < 0:
            log.error(f"Report job details failed.preogress:{progress} {self._parse_param.get_log_common()}")

    def report_progress_thread(self):
        """
        上报进度的线程 
        get_progress_func：外部传入获取进度的方法的回调
        """
        log.info(f"Report thread begin. {self._parse_param.get_log_common()}")
        while self._report_progress_thread_start:
            sub_job_details = self._get_progress_func()
            self.report_progress(sub_job_details)
            time.sleep(SaphanaReportInterval.REPORT_PROGRESS_INTERVAL)
        log.info(f"Report thread end. {self._parse_param.get_log_common()}")

    def get_progress_comm(self):
        # 返回一次正在运行中的进度，适用于无法查到进度的子任务上报正在运行中
        return SubJobDetails(taskId=self._job_id, subTaskId=self._subjob_id,
                             progress=SaphanaProgressPhase.RUNNING,
                             taskStatus=SubJobStatusEnum.RUNNING.value).dict(by_alias=True)

    def create_log_detail(self, key, is_success):
        label_dict = {
            SaphanaJobType.RESTORE_PREDECESSOR: {
                False: SaphanaTaskLabel.PREREQUISITE_FAIL_LABEL.value
            },
            SaphanaJobType.RESTORE_RESTORE: {
                False: SaphanaTaskLabel.RESTORE_SUBJOB_FAIL_LABEL,
                True: SaphanaTaskLabel.RESTORE_SUBJOB_SUCCESS_LABEL
            }
        }

        log_detail = None
        label = label_dict.get(key, {}).get(is_success, "")
        if label:
            log_level = DBLogLevel.INFO.value
            log_param = []
            log_detail_param = []
            err_code = 0
            if not is_success:
                log_level = DBLogLevel.ERROR.value
                err_code = self._error_code
                log_detail_param = self._error_param_list
            if label != SaphanaTaskLabel.PREREQUISITE_FAIL_LABEL.value:
                log_param = self._subjob_id
            log_detail = LogDetail(logInfo=label, logDetailParam=log_detail_param,
                                   logInfoParam=[log_param], logLevel=log_level, logDetail=err_code)
        return log_detail

    def report_success_or_failed(self, is_success, key):
        """
        上报任务成功或者失败
        """
        if is_success:
            task_status = SubJobStatusEnum.COMPLETED.value
        else:
            task_status = SubJobStatusEnum.FAILED.value
        log_detail = self.create_log_detail(key, is_success)
        self.report_label(log_detail=log_detail, task_status=task_status,
                          progress_param=SaphanaProgressPhase.END)

    def report_label(self, log_detail=None, task_status=SubJobStatusEnum.RUNNING.value, progress_param=0):
        """
        上报一次指定的label, 如果上报失败，则重复上报10次
        """
        if log_detail:
            sub_job_details = SubJobDetails(taskId=self._job_id, subTaskId=self._subjob_id,
                                            progress=progress_param, logDetail=[log_detail],
                                            taskStatus=task_status).dict(by_alias=True)
        else:
            sub_job_details = SubJobDetails(taskId=self._job_id, subTaskId=self._subjob_id,
                                            progress=progress_param,
                                            taskStatus=task_status).dict(by_alias=True)
        self.report_progress(sub_job_details, SaphanaRentryCount.REPORT_PROGRESS_RENTRY_COUNT)

    def exec_sub_job(self, key):
        """
        执行子任务函数，根据key，获取真正需要执行的函数回调，然后执行
        """
        try:
            exec_func = self._func_map.get(key)[0]
        except Exception as exception_str:
            log.error(f"Get func exception. {self._parse_param.get_log_common()}")
            return False

        try:
            progress_func = self._func_map.get(key)[1]
        except Exception as exception_str:
            log.error(f"Get func exception. {self._parse_param.get_log_common()}")
            return False

        self._get_progress_func = progress_func
        self._report_progress_thread_start = True
        progress_thread = Thread(target=self.report_progress_thread)
        progress_thread.daemon = True
        progress_thread.start()
        ret = exec_func()
        self._report_progress_thread_start = False
        progress_thread.join()
        return ret

    def create_symlink_copy_file(self, src_file_path, dest_file_path, instance_dir_path):
        # 校验目录合法性
        if not check_path_legal(dest_file_path, SaphanaPath.HANA_LIVE_MOUNT_PATH):
            log.error(f"Path is Unlawful. {self._parse_param.get_log_common()}")
            return False
        return self.create_symlink_copy_file_ex(src_file_path, dest_file_path)

    def create_symlink_copy_file_ex(self, src_file_path, dest_file_path):
        # 若目标文件已经存在
        if os.path.exists(dest_file_path):
            # 对目录进行校验，如果不是链接就直接返回错误
            if not os.path.islink(dest_file_path):
                log.error(f"Dest file path({dest_file_path}) exist and not link. {self._parse_param.get_log_common()}")
                return False
            try:
                os.unlink(dest_file_path)
            except Exception as exception_str:
                log.error(f"Unlink failed. {self._parse_param.get_log_common()}")
                return False
        # 创建链接
        user = f"{self._sid}adm"
        cmd_str = f"ln -s {src_file_path} {dest_file_path}"
        ret, out = CommonFuction.exec_shell_cmd(cmd_str, user)
        if not ret:
            log.error(f"Ln symlink path failed. {out}. {self._parse_param.get_log_common()}")
            return False
        return True

    def create_symlink_and_chown(self, src_file_path, dest_file_path):
        # 若目标文件已经存在
        if os.path.exists(dest_file_path) or os.path.islink(dest_file_path):
            # 对目录进行校验，如果不是链接就直接返回错误
            if not os.path.islink(dest_file_path):
                log.error(f"Dest file path({dest_file_path}) exist and not link. {self._parse_param.get_log_common()}")
                return False
            try:
                os.unlink(dest_file_path)
            except Exception as exception_str:
                log.error(f"Dest file path({dest_file_path}) exist and is link. {self._parse_param.get_log_common()}")
                return False
        # 创建链接
        try:
            os.symlink(src_file_path, dest_file_path)
        except Exception as exception_str:
            log.error(f"Ln symlink path failed. {self._parse_param.get_log_common()}")
            return False
        uid, gid = self._parse_param.restore_get_query_permission()
        if not uid or not gid:
            log.error(f"Fail to get job permission.{uid}, {gid}")
        else:
            try:
                os.chown(dest_file_path, uid, gid)
            except Exception as exception_str:
                log.error(f"Chown exception. {self._parse_param.get_log_common()}")
        return True

    def create_symlink_copy_dir(self, src_path, dest_path, instance_dir_path):
        """
        HANA恢复需要，将所有的副本文件放到同一个目录下。
        此函数将其他目录下的副本文件链接到全量目录下
        src_path:源目录
        dest_path：目标目录
        """
        for file_path in os.listdir(src_path):
            dest_file_path = os.path.join(dest_path, file_path)
            src_file_path = os.path.join(src_path, file_path)
            ret = self.create_symlink_copy_file(src_file_path, dest_file_path, instance_dir_path)
            if not ret:
                log.error(f"Create symlink file failed. {self._parse_param.get_log_common()}")
                return False
        return True

    def deal_inc_data_copy(self, dir_instance):
        """
        日志恢复可能会利用到增量副本需要将，增量或者差异副本中的文件都链接到全量副本目录下
        返回最终整合的副本目录
        """
        copy_id_array, _ = self._parse_param.get_all_copy_id()
        if not copy_id_array:
            log.error(f"Get all data copy id error. {self._parse_param.get_log_common()}")
            return False, ""
        # 下发的第一个副本是全量副本
        full_data_copy_id = copy_id_array[0]
        dest_path = os.path.join(dir_instance, f"data_{self._job_id}", f"{full_data_copy_id}", "data")
        if not os.path.exists(dest_path):
            log.error(f"Dest path not exist. {self._parse_param.get_log_common()}")
            return False, ""
        for copy_id in copy_id_array[1:]:
            src_path = os.path.join(dir_instance, f"data_{self._job_id}", f"{copy_id}", "data")
            if not os.path.exists(src_path):
                log.error(f"Src path not exist. {self._parse_param.get_log_common()}")
                return False, ""
            ret = self.create_symlink_copy_dir(src_path, dest_path, dir_instance)
            if not ret:
                log.error(f"Deal data copy failed. {self._parse_param.get_log_common()}")
                return False, ""
        log.info(f"Deal data copy success. {self._parse_param.get_log_common()}")
        return True, dest_path

    def create_log_link_path(self, dest_path, instance_dir_path):
        if not check_path_legal(dest_path, SaphanaPath.HANA_LIVE_MOUNT_PATH):
            log.error(f"Path is Unlawful. {self._parse_param.get_log_common()}")
            return False
        if os.path.exists(dest_path):
            try:
                shutil.rmtree(dest_path)
            except Exception as exception_str:
                log.error(f"Shutil rmtree failed. {self._parse_param.get_log_common()}")
                return False

        user = f"{self._sid}adm"
        cmd_str = f"mkdir {dest_path}"
        ret, out = CommonFuction.exec_shell_cmd(cmd_str, user)
        if not ret:
            log.error(f"Mkdir path failed. {out}. {self._parse_param.get_log_common()}")
            return False
        return True

    def deal_log_copy(self, dir_instance):
        """
        处理将多个日志副本的文件链接到一个目录下
        恢复时不会克隆日志仓，所有将所有日志文件都链接到data仓中去
        返回最终整合的副本目录
        """
        log_path_array = self._parse_param.get_all_log_path()
        if not log_path_array:
            log.error(f"Get all log path error. {self._parse_param.get_log_common()}")
            return False, ""

        dest_path = os.path.join(dir_instance, f"data_{self._job_id}", f"log_{self._job_id}")
        ret = self.create_log_link_path(dest_path, dir_instance)
        if not ret:
            log.error(f"Create log link path failed. {self._parse_param.get_log_common()}")
            return False, ""

        # 遍历所有的日志目录
        _, log_copy_id_array = self._parse_param.get_all_copy_id()
        if not log_copy_id_array:
            log.error("Log copy id is empty.")
            return False, ""
        last_log_copy_id_path = None
        for log_path in log_path_array:
            src_path = os.path.join(log_path, "data")
            if not os.path.exists(src_path):
                log.error(f"Src path not exist. {self._parse_param.get_log_common()}")
                return False, ""
            ret = self.create_symlink_copy_dir(src_path, dest_path, dir_instance)
            if not ret:
                log.error(f"Deal log copy failed. {self._parse_param.get_log_common()}")
                return False, ""
            if log_copy_id_array[-1] in log_path.split('/')[-1]:
                last_log_copy_id_path = log_path
        dest_catalog_path = os.path.join(dest_path, "catalog")
        if not os.path.exists(dest_catalog_path):
            user = f"{self._sid}adm"
            cmd_str = f"mkdir {dest_catalog_path}"
            ret, out = CommonFuction.exec_shell_cmd(cmd_str, user)
            if not ret:
                log.error(f"Mkdir path failed. {out}. {self._parse_param.get_log_common()}")
                return False, ""
        # 只需要最后一个日志副本的catalog
        ret = self.create_symlink_copy_dir(os.path.join(last_log_copy_id_path, "catalog"), \
                                           dest_catalog_path, dir_instance)
        if not ret:
            log.error(f"Deal catalog failed. {self._parse_param.get_log_common()}")
            return False, ""
        log.info(f"Deal log copy success. {self._parse_param.get_log_common()}")
        return True, dest_path

    def get_log_restore_time(self):
        """
        获取日志恢复的时间 格式2022-11-10 09:04:13
        """
        timestamp = self._parse_param.get_log_restore_timestamp()
        if not timestamp:
            log.error(f"Get timestamp failed. {self._parse_param.get_log_common()}")
            return ""
        try:
            restore_time = RestoreSAPHANA.timestamp_to_utc_time(int(timestamp))
        except Exception as err:
            log.error(f"Convert time failed:{err}. {self._parse_param.get_log_common()}")
            return ""
        log.info(f"The restore Convert time:{restore_time}.")
        return restore_time

    def is_system_db(self):
        return self._saphana_cluster.is_system_db()

    def check_restore_dir(self, path_list):
        # 检查目录是否存在，是否合法
        for path in path_list:
            if not os.path.exists(path):
                log.info(f"Path({path}) not exist. {self._parse_param.get_log_common()}")
                return False
        return True

    """
    SAP HANA租户数据库和系统数据库的恢复需要互斥
    PM无法限制，需要在插件侧通过在共享目录中记录标记文件的方式来处理,以下三个函数完成对应功能
    """

    def create_flag_file(self):
        """
        租户数据库，开始恢复时，创建对应的hana_<job_id>_<数据库名>的标记文件
        """
        try:
            dir_instance = self._saphana_cluster.get_dir_instance_ex()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False
        file_path = os.path.join(dir_instance, f"hana_{self._job_id}_{self._db_name}")
        if not check_path_legal(file_path, dir_instance):
            log.error(f"Path(hana_{self._job_id}_{self._db_name}) is not legal. \
                {self._parse_param.get_log_common()}")
            return False
        if not os.path.exists(file_path):
            # 创建标记文件
            try:
                os.mknod(file_path)
            except Exception as exception_str:
                log.error(f"Mknod file(hana_{self._job_id}_{self._db_name}) failed. \
                    {self._parse_param.get_log_common()}")
                log.exception(exception_str)
                return False
            log.info(f"Create file(hana_{self._job_id}_{self._db_name}) success. \
                {self._parse_param.get_log_common()}")
        return True

    def delete_flag_file(self):
        """
        任务完成后，需要清理标记文件
        """
        try:
            dir_instance = self._saphana_cluster.get_dir_instance_ex()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False
        file_path = os.path.join(dir_instance, f"hana_{self._job_id}_{self._db_name}")
        if not check_path_legal(file_path, dir_instance):
            log.error(f"Path(hana_{self._job_id}_{self._db_name}) is not legal. \
                {self._parse_param.get_log_common()}")
            return False
        if os.path.exists(file_path):
            # 清理标记文件
            try:
                os.remove(file_path)
            except Exception as exception_str:
                log.error(f"Remove file(hana_{self._job_id}_{self._db_name}) failed. \
                    {self._parse_param.get_log_common()}")
                return False
            log.info(f"Remove file(hana_{self._job_id}_{self._db_name}) success. \
                {self._parse_param.get_log_common()}")
        return True

    def check_flag_file(self):
        """
        系统数据库恢复时，通过判断这些标志文件，来判断是否有租户数据在恢复
        """
        db_name = ""
        job_id = ""
        try:
            dir_instance = self._saphana_cluster.get_dir_instance_ex()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False, db_name, job_id

        for file_path in os.listdir(dir_instance):
            # 判断是文件，且以hana_开头，且包含4个-(因为job_id中有4个-)就认为是标记文件
            if os.path.isfile(os.path.join(dir_instance, file_path)) and file_path.startswith("hana_") and \
                    file_path.count("-") == 4:
                log.debug(f"File({file_path}) exist. {self._parse_param.get_log_common()}")
                file_array = file_path.split("_")
                # 该文件格式为hana_<job_id>_<数据库名>
                if len(file_array) != 3:
                    log.error(f"File({file_path}) split failed. {self._parse_param.get_log_common()}")
                    return False, db_name, job_id
                db_name = file_array[2]
                job_id = file_array[1]
                return True, db_name, job_id
        return False, db_name, job_id

    def exec_restore_data_cmd(self, copy_path):
        # 执行普通恢复命令，系统数据库与租户数据库执行方式不同
        pass

    def exec_restore_log_cmd(self, restore_time, cata_log_path, log_data_path, copy_data_path, backup_id):
        # 执行任意时间点恢复命令，系统数据库与租户数据库执行方式不同
        pass

    def restore_data_copy(self, dir_instance):
        """
        数据副本的恢复
        """
        full_copy_id = self._parse_param.get_full_data_copy_id()
        if not full_copy_id:
            log.error(f"Get full copy id failed. {self._parse_param.get_log_common()}")
            return False

        if not check_sql_cmd_param(full_copy_id):
            log.error(f"Invalid copy id:{full_copy_id}. {self._parse_param.get_log_common()}")
            return False

        data_path = os.path.join(dir_instance, f"data_{self._job_id}")
        copy_path = os.path.join(data_path, f"{full_copy_id}", "data", )

        if not os.path.exists(copy_path):
            log.error(f"Copy path not exist. {self._parse_param.get_log_common()}")
            return False
        # 再检查一次目标数据库状态
        if not self.check_restore_db_status():
            log.error(f"Database({self._db_name}) is running. {self._parse_param.get_log_common()}")
            return False
        if not self.exec_restore_data_cmd(copy_path):
            return False
        log.info(f"Restore success. {self._parse_param.get_log_common()}")
        return True

    def restore_log_copy(self, dir_instance):
        """
        处理日志恢复
        """
        ret, copy_data_path = self.deal_inc_data_copy(dir_instance)
        if not ret:
            log.error(f"Deal inc data copy failed. {self._parse_param.get_log_common()}")
            return False
        ret, log_data_path = self.deal_log_copy(dir_instance)
        if not ret:
            log.error(f"Deal log copy failed. {self._parse_param.get_log_common()}")
            return False

        cata_log_path = os.path.join(dir_instance, f"data_{self._job_id}", f"log_{self._job_id}", "catalog")
        path_list = [copy_data_path, log_data_path, cata_log_path]
        if not self.check_restore_dir(path_list):
            return False

        restore_time = self.get_log_restore_time()
        if not restore_time:
            log.error(f"Get log restore time failed. {self._parse_param.get_log_common()}")
            return False
        if not check_sql_cmd_param(self._db_name):
            log.error(f"Invalid dbname:{self._db_name}. {self._parse_param.get_log_common()}")
            return False

        backup_id = self._parse_param.get_backup_id()
        if not backup_id:
            log.error(f"Get log restore time failed. {self._parse_param.get_log_common()}")
            return False
        if not check_sql_cmd_param(str(backup_id)):
            log.error(f"Invalid backupid:{backup_id}. {self._parse_param.get_log_common()}")
            return False

        log.info(f"restore_time:{restore_time}, backup_id:{backup_id}")
        if not self.exec_restore_log_cmd(restore_time, cata_log_path, log_data_path, copy_data_path, backup_id):
            return False
        log.info(f"Restore success. {self._parse_param.get_log_common()}")
        return True

    def restore(self):
        try:
            dir_instance = self._saphana_cluster.get_dir_instance()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False
        timestamp = self._parse_param.get_log_restore_timestamp()
        if timestamp:
            ret = self.restore_log_copy(dir_instance)
        else:
            ret = self.restore_data_copy(dir_instance)
        self.delete_flag_file()
        return ret

    def _check_recover(self, data_dir: str, log_dir: str, catalog_dir: str, target_time: str) -> bool:
        files = os.listdir(data_dir) + os.listdir(log_dir)
        saphana = RestoreSAPHANA(self._sid, self._db_name, self._system_db_port, self._system_db_user, "")
        backups = saphana.list_required_backups(catalog_dir, target_time, ignore_delta=False)
        if set(backups) - set(files):
            log.warning(f"Missing backups without delta ignore. {backups} {files}")
        else:
            log.info("Check recover successfully without delta ignore.")
            self._ignore_delta = False
            return True
        backups = saphana.list_required_backups(catalog_dir, target_time, ignore_delta=True)
        if set(backups) - set(files):
            log.error(f"Missing backups with delta ignore. {backups} {files}")
            return False
        else:
            log.info("Check recover successfully with delta ignore.")
            self._ignore_delta = True
            return True

