#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import re
import time
import datetime
import shutil
from abc import ABC, abstractmethod
from threading import Thread
import pwd
from pathlib import Path

from common.common_models import SubJobModel, SubJobDetails, LogDetail
from common.parse_parafile import add_env_param
from common.common import check_path_legal, convert_timestamp_to_time, check_sql_cmd_param, \
    output_execution_result_ex, execute_cmd, read_tmp_json_file, CMDResult
from common.cleaner import clear
from common.const import SubJobStatusEnum, SubJobTypeEnum, SubJobPolicyEnum, DBLogLevel, \
    ParamConstant, AuthType
from generaldb.saphana.comm.saphana_file_operate import log
from saphana.resource.saphana_cluster_manager import SaphanaClusterManage
from generaldb.saphana.comm.saphana_common_function import CommonFuction
from generaldb.saphana.comm.saphana_const import SaphanaSubjobName, SaphanaJsonConstant, SaphanaReportInterval, \
    SaphanaProgressPhase, SaphanaRentryCount, SaphanaJobType, SaphanaConst, SaphanaPath, SaphanaTaskLabel, \
    SaphanaErrorCode, SaphanaMetadataKey
from saphana.common.saphana_const import SapConst
from saphana.resource.common_util import HanaCommonUtil
from saphana.restore.saphana_parse_restore_params import SaphanaParseRestoreParam
from saphanay.restore.restore_saphana import RestoreSAPHANA


class SaphanaRestoreBackintParent(ABC):
    def __init__(self, pid, job_id, subjob_id, parse_params_obj: SaphanaParseRestoreParam):
        if not parse_params_obj:
            log.error("Parse params obj is null.")
            raise Exception("Parse params obj is null.")
        self._pid = pid
        self._job_id = job_id
        self._subjob_id = subjob_id
        self._parse_param = parse_params_obj
        self._sid = self._parse_param.get_sid()
        self._system_db_port = self._parse_param.get_system_db_port()
        self._system_db_user = self._parse_param.get_system_db_user()
        self._systemdb_auth_type = int(self._parse_param.get_systemdb_auth_type())
        self._backupdb_auth_type = int(self._parse_param.get_protectdb_auth_type())
        self._system_db_pwd_env = ""
        self.init_system_db_param()
        self._db_name = self._parse_param.get_restore_db_name()
        self._saphana_cluster = SaphanaClusterManage({SaphanaJsonConstant.SYSTEM_ID: self._sid,
                                                      SaphanaJsonConstant.SYSTEM_DB_PORT: self._system_db_port,
                                                      SaphanaJsonConstant.PROTECT_DATABASE: self._db_name,
                                                      SaphanaJsonConstant.SYSTEM_DB_USER: self._system_db_user,
                                                      SaphanaJsonConstant.SYSTEMDB_AUTH_TYPE: self._systemdb_auth_type,
                                                      SaphanaJsonConstant.PROTECTDB_AUTH_TYPE:
                                                          self._backupdb_auth_type},
                                                     self._pid)
        self._error_code = 0
        self._error_param_list = []
        self._report_progress_thread_start = False
        self.init_dict()
        self._get_progress_func = None
        # 任务是否处于恢复子任务中
        self.is_restore_sub_task = False
        # 恢复子任务数据库日志文件更新监控，正常情况一会一直更新
        self.log_file_is_update = False
        self._ignore_delta = True

    def init_system_db_param(self):
        # 获取备份的数据库的用户和密码
        # 获取自定义参数
        if self._systemdb_auth_type == AuthType.APP_PASSWORD.value:
            system_db_password = self._parse_param.get_system_db_pwd()
            add_env_param(self._system_db_pwd_env, system_db_password)
            clear(system_db_password)
        else:
            self._system_db_pwd_env = f"{SaphanaJsonConstant.JOB}_{SaphanaJsonConstant.TARGET_ENV}" \
                                      f"_{SaphanaJsonConstant.AUTH}_" \
                                      f"{SaphanaJsonConstant.EXTEND_INFO}_{SaphanaJsonConstant.KEY_INFO}_{self._pid}"

    def init_dict(self):
        self._func_map = {
            SaphanaJobType.RESTORE_PREDECESSOR: [
                self.restore_prerequisite,
                self.get_progress_comm
            ],
            SaphanaJobType.RESTORE_RESTORE: [
                self.exec_restore_sub_job,
                self.get_progress_comm
            ],
            SaphanaJobType.RESTORE_POST: [
                self.restore_post,
                self.get_progress_comm
            ]
        }

    def gen_sub_job(self):
        # 拆分子任务，每个节点拆分一个准备子任务，再拆一个通用的恢复子任务
        hana_nodes = self._parse_param.get_nodes()
        if not hana_nodes:
            log.error(f"Get nodes failed. {self._parse_param.get_log_common()}")
            return []
        sub_job_array = []
        sub_job = SubJobModel(jobId=self._job_id, subJobId="", jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value,
                              jobName=SaphanaSubjobName.RESTORE, jobPriority=2, policy=SubJobPolicyEnum.ANY_NODE.value,
                              ignoreFailed=False, jobInfo="").dict(by_alias=True)
        sub_job_array.append(sub_job)
        for node in hana_nodes:
            # 每个节点拆一个白名单子任务
            host_id = self._parse_param.get_hostid_by_node(node)
            sub_job = SubJobModel(jobId=self._job_id, subJobId="", jobType=SubJobTypeEnum.BUSINESS_SUB_JOB.value,
                                  jobName=SaphanaSubjobName.PREPARE, jobPriority=1,
                                  policy=SubJobPolicyEnum.FIXED_NODE.value,
                                  ignoreFailed=False, execNodeId=host_id, jobInfo="").dict(by_alias=True)
            sub_job_array.append(sub_job)
        log.info(f"Gen sub job success. {self._parse_param.get_log_common()}")
        return sub_job_array

    def create_symlink(self, src, dest):
        user = f"{self._sid}adm"
        # 创建软连接
        cmd_str = f"ln -s {src} {dest}"
        ret, out = CommonFuction.exec_shell_cmd(cmd_str, user)
        if not ret:
            log.error(f"Ln from {src} to {dest} symlink path failed. {out}. {self._parse_param.get_log_common()}")
            return False
        log.debug("Ln from %s to %s success", src, dest)
        return True

    def exec_prepare_job(self):
        self.prepare_backint_config()
        self.prepare_param_for_backint(self._saphana_cluster.get_dir_instance_ex())
        log.warning(f"Will clean log backup path.")
        self.clean_log_backup_path()
        return True

    def clean_log_backup_path(self) -> None:
        log_backup_path = self._parse_param.get_log_backup_path()
        if not log_backup_path:
            log.info(f"No log backup path.")
            return
        log_backup_path = os.path.join(
            log_backup_path, f"DB_{self._db_name.upper()}" if self._db_name.upper() != "SYSTEMDB" else "SYSTEMDB")
        if not os.path.exists(log_backup_path):
            log.info(f"No log backup path.")
            return
        log.info(f"Will clean log backup path. {log_backup_path}")
        files = [str(f.absolute()) for f in Path(log_backup_path).iterdir() if f.is_file() and f.suffix[1:].isdigit()]
        log.warning(f"Clean log backup path. {files}")
        [os.remove(f) for f in files]
        _dirs = [str(d.absolute()) for d in Path(log_backup_path).iterdir() if d.is_dir() and d.name.isdigit()]
        log.warning(f"Clean log backup path. {_dirs}")
        [shutil.rmtree(d, ignore_errors=True) for d in _dirs]

    def allow_restore(self):
        # 由子类实现
        pass

    def allow_restore_in_local_node(self):
        # 判断子任务是否能在当前节点执行
        # 只有恢复子任务判断
        sub_job_name = self._parse_param.get_subjob_name()
        if sub_job_name == SaphanaSubjobName.PREPARE:
            return True
        return self.allow_restore()

    def get_errcode(self):
        return self._error_code

    def check_restore_version(self, copy_version):
        try:
            local_version = self._saphana_cluster.get_version()
        except Exception as exception_str:
            # 获取版本失败，返回版本检查通过
            log.warning(f"Get version failed. {self._parse_param.get_log_common()}")
            return True
        if not local_version:
            log.warning(f"Get version failed. {self._parse_param.get_log_common()}")
            return True

        # HANA版本号示例：2.00.020.00.1500920972 我们只需用到主版本号
        copy_version_array = copy_version.split(".")
        local_version_array = local_version.split(".")
        if not copy_version_array or not local_version_array:
            log.error(f"Version is invaild. {self._parse_param.get_log_common()}")
            return False
        if copy_version_array[0] <= local_version_array[0]:
            log.info(f"Version check success. {self._parse_param.get_log_common()}")
            return True
        log.error(f"Version check failed. {self._parse_param.get_log_common()}")
        self._error_param_list = [copy_version, local_version]
        return False

    def check_multi_system(self, copy_multi_tenant_system):
        is_multi = self._saphana_cluster.is_multi_system()
        if is_multi == copy_multi_tenant_system:
            log.info(f"Tenant system match. {self._parse_param.get_log_common()}")
            return True
        log.error(f"Tenant system not match. {self._parse_param.get_log_common()}")

        def get_multi_str(is_multi_param):
            if is_multi_param:
                return "multi"
            return "single"

        self._error_param_list = [get_multi_str(copy_multi_tenant_system), get_multi_str(is_multi)]
        return False

    def check_restore_db_status(self):
        """
        检查指定的数据库状态是否能恢复
        """
        if self.is_system_db():
            return True
        ret = self._saphana_cluster.get_database_running_status(self._system_db_pwd_env)
        if not ret:
            # 处于停止状态
            return True
        log.error(f"Database({self._db_name}) is running. {self._parse_param.get_log_common()}")
        self._error_code = SaphanaErrorCode.DB_NOT_CLOSE.value
        return False

    def check_restore_topology(self, copy_extend_info):
        """
        检查副本的拓扑结构是否与恢复目标端的保持一致
        """
        copy_db_info = copy_extend_info.get(SaphanaMetadataKey.DB_INFO.value, "")
        if not copy_db_info:
            log.warning(f"Get copy db info failed. {self._parse_param.get_log_common()}")
            return True
        nodes = self._parse_param.get_nodes()
        if not nodes:
            log.warning(f"Get nodes failed. {self._parse_param.get_log_common()}")
            return True
        if len(copy_db_info) != len(nodes):
            log.error(f"Copy topology dismatch. {self._parse_param.get_log_common()}")
            self._error_code = SaphanaErrorCode.TOPOLOGY_DISMATCH.value
            return False
        return True

    def is_backup_db_systemdb(self):
        # 判断备份副本的数据库是否为系统数据库
        backup_db_name = self._parse_param.get_backup_database_name()
        backup_system_id = self._parse_param.get_backup_system_id()
        try:
            is_multi_tenant_system = self._parse_param.get_backup_multi_tenant_system()
        except Exception as err:
            log.error("Fail to get multi attr.")
            raise err
        if is_multi_tenant_system and backup_db_name.upper() == SaphanaConst.SYSTEM_DB:
            return True
        if not is_multi_tenant_system and backup_db_name.upper() == backup_system_id.upper():
            return True
        log.error(f"Can not recover from {backup_db_name} to {self._db_name}.")
        return False

    def check_system_teant_not_recover_each_other(self):
        is_system_tenant_mix = False
        if self._saphana_cluster.is_system_db() and not self.is_backup_db_systemdb():
            is_system_tenant_mix = True
        elif not self._saphana_cluster.is_system_db() and self.is_backup_db_systemdb():
            is_system_tenant_mix = True
        if is_system_tenant_mix:
            self._error_code = SaphanaErrorCode.SYSTEM_TENANT_NOT_RECOVER_EACH_OTHER
            self._error_param_list = [self._parse_param.get_backup_database_name(), self._db_name]
            return False
        return True

    def check_systemid(self, first_copy_extend_info):
        backup_system_id = first_copy_extend_info.get(SaphanaMetadataKey.SYSTEM_ID, "")
        if self._saphana_cluster.is_system_db() and backup_system_id != self._sid:
            log.error(f"Copy system id {backup_system_id} is not equal to target {self._sid}")
            self._error_code = SaphanaErrorCode.SYSTEM_ID_NOT_EQUAL.value
            self._error_param_list = [backup_system_id, self._sid]
            return False
        return True

    def check_running_restore_job(self):
        # 单租户模式只有系统数据库，不检查
        if not self._saphana_cluster.is_multi_system():
            return True
        # 多租户模式检查系统数据库和租户数据库恢复任务互斥
        is_system_db = self._saphana_cluster.is_system_db()
        ret, db_name, job_id = self.check_flag_file(is_system_db)
        if ret:
            log.error(f"Check flag file failed. {self._parse_param.get_log_common()}")
            self._error_code = SaphanaErrorCode.HANA_RESTORE_BUSY.value if is_system_db\
                else SaphanaErrorCode.HANA_RESTORE_SYSTEMDB_BLOCK_TENANTDB.value
            self._error_param_list = [db_name, job_id]
            return False
        return True

    def restore_prerequisite(self):
        """
        前置任务：1.查询副本的拓扑结构是否和当前环境一样 2.查询副本版本和当前数据库版本 3. 查看system id是否一致
        """
        # 检查backint工具
        if not HanaCommonUtil.check_backint_tool(self._sid, self._saphana_cluster.get_dir_instance()):
            return False
        # 系统数据库与租户数据库不能互相恢复
        if not self.check_system_teant_not_recover_each_other():
            log.error(f"Check system teant not recover each other failed. {self._parse_param.get_log_common()}")
            return False
        first_copy_extend_info = self._parse_param.get_first_copy_extend_info()
        if not first_copy_extend_info:
            log.error(f"Get copy extendInfo failed. {self._parse_param.get_log_common()}")
            return False
        copy_hana_version = first_copy_extend_info.get(SaphanaJsonConstant.SAPHANA_VERSION, "")
        if copy_hana_version and not self.check_restore_version(copy_hana_version):
            self._error_code = SaphanaErrorCode.VERSION_DISMATCH.value
            return False
        is_multi = first_copy_extend_info.get(SaphanaJsonConstant.MULTI_TENANT_SYSTEM, "error")
        if is_multi != "error" and not self.check_multi_system(is_multi):
            log.error(f"Multi system error. {self._parse_param.get_log_common()}")
            self._error_code = SaphanaErrorCode.MULTI_MODE_DISMATCH.value
            return False
        if not self.check_restore_db_status():
            return False
        if not self.check_restore_topology(first_copy_extend_info):
            return False
        # 判断system id是否一致
        if not self.check_systemid(first_copy_extend_info):
            return False
        # 检查系统数据库和租户数据库恢复任务互斥
        if not self.check_running_restore_job():
            return False
        # 如果是恢复租户数据库，先判断租户数据库是否存在
        if not self._saphana_cluster.is_system_db() and not \
                self._saphana_cluster.is_tenant_db_exist_for_restore(self._system_db_pwd_env):
            self._error_code = SaphanaErrorCode.DB_NOT_EXIST
            return False
        if not self.create_flag_file():
            log.error(f"Create flag file failed. {self._parse_param.get_log_common()}")
            return False
        return True

    @abstractmethod
    def progress_func(self):
        pass

    def exec_restore_sub_job(self):
        exec_func = None
        job_name = self._parse_param.get_subjob_name()

        if job_name == SaphanaSubjobName.RESTORE:
            exec_func = self.restore
            log_detail = LogDetail(logInfo=SaphanaTaskLabel.RESTORE_START_COPY_SUBJOB.value,
                                   logInfoParam=[self._parse_param.get_local_ip_by_self_nodes(), self._subjob_id],
                                   logLevel=DBLogLevel.INFO.value)
        elif job_name == SaphanaSubjobName.PREPARE:
            exec_func = self.exec_prepare_job
            log_detail = LogDetail(logInfo=SaphanaTaskLabel.RESTORE_START_MOUNT_SUBJOB.value,
                                   logInfoParam=[self._parse_param.get_local_ip_by_self_nodes(), self._subjob_id],
                                   logLevel=DBLogLevel.INFO.value)
        else:
            log.error(f"Subjob name({job_name}) not support. {self._parse_param.get_log_common()}")
            return False
        self.report_label(log_detail)
        return exec_func()

    def clean_file(self, file_list):
        log.info(f"Prepare delete {file_list}.")
        for file_name in file_list:
            if os.path.exists(file_name):
                try:
                    os.remove(file_name)
                except Exception as exception_str:
                    log.warning(f"Rm {file_name} file failed. {self._parse_param.get_log_common()}")
                    continue
                log.info(f"Rm {file_name} success. {self._parse_param.get_log_common()}")

    def restore_post(self):
        """
        后置子任务：1.清理掉挂载目录
        """
        try:
            dir_instance = self._saphana_cluster.get_dir_instance_ex()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False
        if not dir_instance:
            log.error(f"Get dir instance is null.")
            return False
        self.delete_flag_file()
        restore_dir = os.path.join(dir_instance, f"{self._job_id}")
        log.info(f"Rm retore tmp restore_dir:{restore_dir}.")
        if os.path.exists(restore_dir) and self._job_id:
            try:
                shutil.rmtree(restore_dir)
            except Exception as err:
                log.error(f"rmtree path failed:{err}.")
            else:
                log.info(f"Rm retore tmp restore_dir:{restore_dir}.")

        def gen_db_name_flag_file(db_name):
            file_name = SapConst.RECOVERY_PARAM_FILE.format(self._saphana_cluster.db_name_prefix() + db_name)
            file_path = os.path.join(SapConst.SAPHANA_PARAM_PATH, file_name)
            return file_path

        rm_file_list = []
        backup_db_name = self._parse_param.get_backup_database_name()
        if backup_db_name != self._db_name:
            rm_file_list.append(gen_db_name_flag_file(backup_db_name))
        rm_file_list.append(gen_db_name_flag_file(self._db_name))
        rm_file_list.append(os.path.join(ParamConstant.PARAM_FILE_PATH, f"hana_systemdb_{self._job_id}"))
        self.clean_file(rm_file_list)
        return True

    def report_progress(self, subjob_details, rentry_count=1):
        """
        上报一次进度
        rentry_count：上报返回失败的重试次数
        """
        progress = subjob_details.get(SaphanaJsonConstant.PROGRESS, 0)
        while rentry_count >= 1:
            rentry_count -= 1
            ret = CommonFuction.report_job_details(self._job_id, subjob_details)
            if ret:
                log.info(f"Report job details success.preogress:{progress} {self._parse_param.get_log_common()}")
                break
            time.sleep(SaphanaReportInterval.REPORT_PROGRESS_INTERVAL)
        if rentry_count < 0:
            log.error(f"Report job details failed.preogress:{progress} {self._parse_param.get_log_common()}")

    def report_progress_thread(self):
        """
        上报进度的线程
        get_progress_func：外部传入获取进度的方法的回调
        """
        log.info(f"Report thread begin. {self._parse_param.get_log_common()}")
        while self._report_progress_thread_start:
            sub_job_details = self._get_progress_func()
            self.report_progress(sub_job_details)
            time.sleep(SaphanaReportInterval.REPORT_PROGRESS_INTERVAL)
        log.info(f"Report thread end. {self._parse_param.get_log_common()}")

    def get_progress_comm(self):
        # 返回一次正在运行中的进度，适用于无法查到进度的子任务上报正在运行中
        # 如果是恢复子任务过程中数据库日志文件一直没有更新，表明任务卡住，上报警告
        if self.is_restore_sub_task and not self.log_file_is_update:
            log_detail = LogDetail(
                logInfo=SaphanaTaskLabel.HANA_RESCOVERY_PROGRESS_TIMEOUT.value,
                logInfoParam=[self._parse_param.get_local_ip_by_self_nodes(), "SAP HANA"],
                logLevel=DBLogLevel.WARN
            )
            return SubJobDetails(taskId=self._job_id, subTaskId=self._subjob_id,
                                 progress=SaphanaProgressPhase.RUNNING,
                                 logDetail=[log_detail],
                                 taskStatus=SubJobStatusEnum.RUNNING.value).dict(by_alias=True)
        return SubJobDetails(taskId=self._job_id, subTaskId=self._subjob_id,
                             progress=SaphanaProgressPhase.RUNNING,
                             taskStatus=SubJobStatusEnum.RUNNING.value).dict(by_alias=True)

    def create_log_detail(self, key, is_success):
        label_dict = {
            SaphanaJobType.RESTORE_PREDECESSOR: {
                False: SaphanaTaskLabel.PREREQUISITE_FAIL_LABEL.value
            },
            SaphanaJobType.RESTORE_RESTORE: {
                False: SaphanaTaskLabel.RESTORE_SUBJOB_FAIL_LABEL,
                True: SaphanaTaskLabel.RESTORE_SUBJOB_SUCCESS_LABEL
            }
        }

        log_detail = None
        label = label_dict.get(key, {}).get(is_success, "")
        if label:
            log_level = DBLogLevel.INFO.value
            log_param = []
            log_detail_param = []
            err_code = 0
            if not is_success:
                log_level = DBLogLevel.ERROR.value
                err_code = self._error_code
                log_detail_param = self._error_param_list
            if label != SaphanaTaskLabel.PREREQUISITE_FAIL_LABEL.value:
                log_param = self._subjob_id
            log_detail = LogDetail(logInfo=label, logDetailParam=log_detail_param,
                                   logInfoParam=[log_param], logLevel=log_level, logDetail=err_code)
        return log_detail

    def report_success_or_failed(self, is_success, key):
        """
        上报任务成功或者失败
        """
        if is_success:
            task_status = SubJobStatusEnum.COMPLETED.value
        else:
            task_status = SubJobStatusEnum.FAILED.value
        log_detail = self.create_log_detail(key, is_success)
        self.report_label(log_detail=log_detail, task_status=task_status,
                          progress_param=SaphanaProgressPhase.END)

    def report_label(self, log_detail=None, task_status=SubJobStatusEnum.RUNNING.value, progress_param=0):
        """
        上报一次指定的label, 如果上报失败，则重复上报10次
        """
        if log_detail:
            sub_job_details = SubJobDetails(taskId=self._job_id, subTaskId=self._subjob_id,
                                            progress=progress_param, logDetail=[log_detail],
                                            taskStatus=task_status).dict(by_alias=True)
        else:
            sub_job_details = SubJobDetails(taskId=self._job_id, subTaskId=self._subjob_id,
                                            progress=progress_param,
                                            taskStatus=task_status).dict(by_alias=True)
        self.report_progress(sub_job_details, SaphanaRentryCount.REPORT_PROGRESS_RENTRY_COUNT)

    def exec_sub_job(self, key):
        """
        执行子任务函数，根据key，获取真正需要执行的函数回调，然后执行
        """
        try:
            exec_func = self._func_map.get(key)[0]
        except Exception as exception_str:
            log.error(f"Get func exception. {self._parse_param.get_log_common()}")
            return False

        try:
            progress_func = self._func_map.get(key)[1]
        except Exception as exception_str:
            log.error(f"Get func exception. {self._parse_param.get_log_common()}")
            return False

        self._get_progress_func = progress_func
        self._report_progress_thread_start = True
        progress_thread = Thread(target=self.report_progress_thread)
        progress_thread.daemon = True
        progress_thread.start()
        ret = exec_func()
        self._report_progress_thread_start = False
        progress_thread.join()
        return ret

    def create_symlink_copy_file(self, src_file_path, dest_file_path, instance_dir_path):
        # 校验目录合法性
        if not check_path_legal(dest_file_path, SaphanaPath.HANA_LIVE_MOUNT_PATH):
            log.error(f"Path is Unlawful. {self._parse_param.get_log_common()}")
            return False
        return self.create_symlink_copy_file_ex(src_file_path, dest_file_path)

    def create_symlink_copy_file_ex(self, src_file_path, dest_file_path):
        # 若目标文件已经存在
        if os.path.exists(dest_file_path):
            # 对目录进行校验，如果不是链接就直接返回错误
            if not os.path.islink(dest_file_path):
                log.error(f"Dest file path({dest_file_path}) exist and not link. {self._parse_param.get_log_common()}")
                return False
            try:
                os.unlink(dest_file_path)
            except Exception as exception_str:
                log.error(f"Unlink failed. {self._parse_param.get_log_common()}")
                return False
        # 创建链接
        user = f"{self._sid}adm"
        cmd_str = f"ln -s {src_file_path} {dest_file_path}"
        ret, out = CommonFuction.exec_shell_cmd(cmd_str, user)
        if not ret:
            log.error(f"Ln symlink path failed. {out}. {self._parse_param.get_log_common()}")
            return False
        return True

    def create_symlink_and_chown(self, src_file_path, dest_file_path):
        # 若目标文件已经存在
        if os.path.exists(dest_file_path) or os.path.islink(dest_file_path):
            # 对目录进行校验，如果不是链接就直接返回错误
            if not os.path.islink(dest_file_path):
                log.error(f"Dest file path({dest_file_path}) exist and not link. {self._parse_param.get_log_common()}")
                return False
            try:
                os.unlink(dest_file_path)
            except Exception as exception_str:
                log.error(f"Unlink failed. {self._parse_param.get_log_common()}")
                return False
        # 创建链接
        cmd_str = f"ln -s {src_file_path} {dest_file_path}"
        ret, out = CommonFuction.exec_shell_cmd(cmd_str)
        if not ret:
            log.error(f"Ln symlink path failed. {out}. {self._parse_param.get_log_common()}")
            return False
        uid, gid = self._parse_param.query_restore_permission()
        if not uid or not gid:
            log.error(f"Fail to get job permission.{uid}, {gid}")
            return False
        try:
            os.chown(dest_file_path, uid, gid)
        except Exception as exception_str:
            log.error(f"Chown exception. {self._parse_param.get_log_common()}")
            return False
        return True

    def create_symlink_copy_dir(self, src_path, dest_path, instance_dir_path):
        """
        HANA恢复需要，将所有的副本文件放到同一个目录下。
        此函数将其他目录下的副本文件链接到全量目录下
        src_path:源目录
        dest_path：目标目录
        """
        for file_path in os.listdir(src_path):
            dest_file_path = os.path.join(dest_path, file_path)
            src_file_path = os.path.join(src_path, file_path)
            ret = self.create_symlink_copy_file(src_file_path, dest_file_path, instance_dir_path)
            if not ret:
                log.error(f"Create symlink file failed. {self._parse_param.get_log_common()}")
                return False
        return True

    def get_log_restore_time(self):
        """
        获取日志恢复的时间 格式2022-11-10 09:04:13
        """
        # Unix 时间戳
        timestamp = self._parse_param.get_log_restore_timestamp()
        if not timestamp:
            log.error(f"Get timestamp failed. {self._parse_param.get_log_common()}")
            return ""
        try:
            datetime_obj = datetime.datetime.fromtimestamp(int(timestamp), datetime.timezone.utc)
        except Exception as err:
            log.error(f"Convert time failed:{err}. {self._parse_param.get_log_common()}")
            return ""
        restore_time = datetime_obj.strftime("%Y-%m-%d %H:%M:%S")
        log.info(f"The restore Convert time: {timestamp} -> {restore_time}.")
        return restore_time

    def is_system_db(self):
        return self._saphana_cluster.is_system_db()

    def check_restore_dir(self, path_list):
        # 检查目录是否存在，是否合法
        for path in path_list:
            if not os.path.exists(path):
                log.info(f"Path({path}) not exist. {self._parse_param.get_log_common()}")
                return False
        return True

    """
    SAP HANA租户数据库和系统数据库的恢复需要互斥
    PM无法限制，需要在插件侧通过在共享目录中记录标记文件的方式来处理,以下三个函数完成对应功能
    """

    def create_flag_file(self):
        """
        数据库开始恢复时，创建对应的hana_<job_id>_<数据库名>的标记文件
        """
        try:
            dir_instance = self._saphana_cluster.get_dir_instance_ex()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False
        log.info("Creating restore flag file, job id: %s, database: %s.", self._job_id, self._db_name)
        file_path = os.path.join(dir_instance, f"hana_{self._job_id}_{str(self._db_name).upper()}")
        if not check_path_legal(file_path, dir_instance):
            log.error(f"Path(hana_{self._job_id}_{str(self._db_name).upper()}) is not legal. \
                {self._parse_param.get_log_common()}")
            return False
        if not os.path.exists(file_path):
            # 创建标记文件
            try:
                os.mknod(file_path)
            except Exception as err:
                log.error(f"Mknod file(hana_{self._job_id}_{self._db_name}) failed.")
                log.exception(err)
                return False
            log.info(f"Create file(hana_{self._job_id}_{self._db_name}) success.")
        return True

    def delete_flag_file(self):
        """
        任务完成后，需要清理标记文件
        """
        try:
            dir_instance = self._saphana_cluster.get_dir_instance_ex()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False
        file_path = os.path.join(dir_instance, f"hana_{self._job_id}_{str(self._db_name).upper()}")
        if not check_path_legal(file_path, dir_instance):
            log.error(f"Path(hana_{self._job_id}_{str(self._db_name).upper()}) is not legal. \
                {self._parse_param.get_log_common()}")
            return False
        log.info(f"Deleting restore flag file:{file_path}")
        if not os.path.exists(file_path):
            log.info("The restore flag file(%s) does not exist.", file_path)
            return True
        # 清理标记文件
        try:
            os.remove(file_path)
        except Exception as exception_str:
            log.error(f"Remove file:{file_path} failed.")
            return False
        log.info(f"Remove file:{file_path} success.")
        return True

    def check_flag_file(self, is_system_db):
        """
        数据库恢复时，检查实例目录下的标记文件来判断是否允许恢复
        """
        db_name = ""
        job_id = ""
        try:
            dir_instance = self._saphana_cluster.get_dir_instance_ex()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False, db_name, job_id
        if not os.path.isdir(dir_instance):
            return False, db_name, job_id
        # 示例文件：hana_fe54d64f-7cbf-4415-a658-9a2bd6bc3bd7_SYSTEMDB
        flag_reg = r"^hana_([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})_(.+)$"
        for name in os.listdir(dir_instance):
            tmp_path = os.path.join(dir_instance, name)
            if not os.path.isfile(tmp_path):
                continue
            match_ret = re.match(flag_reg, name)
            if not match_ret:
                continue
            job_id = match_ret.groups()[0]
            db_name = match_ret.groups()[1]
            # 系统数据库恢复时，如果存在租户数据库恢复标记，则报错
            if is_system_db:
                if str(db_name).upper() != SaphanaConst.SYSTEM_DB:
                    return True, db_name, job_id
                continue
            # 租户数据库恢复时，如果存在系统数据库恢复标记，则报错
            if str(db_name).upper() == SaphanaConst.SYSTEM_DB:
                return True, db_name, job_id
        return False, db_name, job_id

    def exec_restore_data_cmd(self):
        # 执行普通恢复命令，系统数据库与租户数据库执行方式不同
        pass

    def exec_restore_log_cmd(self, restore_time, cata_log_path, log_data_path, backup_id):
        # 执行任意时间点恢复命令，系统数据库与租户数据库执行方式不同
        pass

    def get_tmp_param_dbname(self):
        """
        在恢复时，saphana_recovery_DB_{self._db_name}.txt参数文件里DB名称
        """
        # 先获取备份时的数据库名称
        backup_db_name = self._parse_param.get_backup_database_name()
        log.debug(f"backup_db_name: {backup_db_name}")
        if backup_db_name != self._db_name:
            return backup_db_name
        return self._db_name

    def clean_backup_flag_file(self, db_name):
        file_name = SapConst.BACKUP_PARAM_FILE.format(self._saphana_cluster.db_name_prefix() + db_name)
        file_path = os.path.join(SapConst.SAPHANA_PARAM_PATH, file_name)
        if os.path.isfile(file_path):
            os.remove(file_path)

    def gen_recovery_flag_file(self, db_name, param):
        file_name = SapConst.RECOVERY_PARAM_FILE.format(self._saphana_cluster.db_name_prefix() + db_name)
        file_path = os.path.join(SapConst.SAPHANA_PARAM_PATH, file_name)
        if not check_path_legal(file_path, SapConst.SAPHANA_PARAM_PATH):
            log.error("Path is unlawful path")
            return False
        output_execution_result_ex(file_path, param)
        cluster_uid, cluster_gid = self._saphana_cluster.get_instance_user_group()
        try:
            os.chown(file_path, uid=cluster_uid, gid=cluster_gid)
        except Exception as exception_str:
            log.error(f"Chown file failed.")
            return False
        return True

    def prepare_param_for_backint(self, dir_instance):
        log.info("Start to prepare backint param")
        full_copy_id = self._parse_param.get_full_data_copy_id()
        if not full_copy_id:
            log.error(f"Get full copy id failed. {self._parse_param.get_log_common()}")
            return False

        data_path = os.path.join(dir_instance, self._job_id)
        param = dict()
        meta_path = self._parse_param.get_meta_path()
        cache_path = self._parse_param.get_cache_path()
        param["pid"] = self._pid
        param["job_id"] = self._job_id
        param["sub_job_id"] = self._subjob_id
        param["db_name"] = self._db_name
        param["data_path"] = data_path
        param["cache_path"] = cache_path
        param["meta_path"] = meta_path
        db_name_list = [self._db_name]
        backup_db_name = self._parse_param.get_backup_database_name()
        log.debug(f"backup_db_name: {backup_db_name}")
        if backup_db_name != self._db_name:
            db_name_list.append(backup_db_name)
        for db_name in db_name_list:
            self.clean_backup_flag_file(db_name)
            if not self.gen_recovery_flag_file(db_name, param):
                return False
        log.info("Succeed to prepare backint param")
        return True

    def restore_data_copy(self):
        """
        数据副本的恢复
        """
        # 再检查一次目标数据库状态
        if not self.check_restore_db_status():
            log.error(f"Database({self._db_name}) is running. {self._parse_param.get_log_common()}")
            return False
        if not self.exec_restore_data_cmd():
            return False
        log.info(f"Restore success. {self._parse_param.get_log_common()}")
        return True

    def restore_log_copy(self):
        """
        处理日志恢复
        """
        restore_time = self.get_log_restore_time()
        if not restore_time:
            log.error(f"Get log restore time failed. {self._parse_param.get_log_common()}")
            return False
        if not check_sql_cmd_param(self._db_name):
            log.error(f"Invalid dbname:{self._db_name}. {self._parse_param.get_log_common()}")
            return False

        backup_id = self._parse_param.get_first_full_backup_id()
        if not backup_id:
            log.error(f"Get log restore time failed. {self._parse_param.get_log_common()}")
            return False
        if not check_sql_cmd_param(str(backup_id)):
            log.error(f"Invalid backupid:{backup_id}. {self._parse_param.get_log_common()}")
            return False
        catalog_data_path = os.path.join(self._saphana_cluster.get_dir_instance_ex(), self._job_id)
        log_data_path = os.path.join(catalog_data_path, "tmp")
        log.info(f"restore_time:{restore_time}, backup_id:{backup_id}")
        if not self.exec_restore_log_cmd(restore_time, catalog_data_path, log_data_path, backup_id):
            return False
        log.info(f"Restore success. {self._parse_param.get_log_common()}")
        return True

    def prepare_log_restore_file(self, dest_path, user_info):
        all_log_path = []
        if self._parse_param.get_log_restore_timestamp():
            all_log_path = self._parse_param.get_all_log_path()
        wait_for_link_log = []
        for each_log_path in all_log_path:
            # 修改权限
            cmd = f"chown -R {user_info.pw_uid}:{user_info.pw_gid} {each_log_path}"
            ret, _, err = execute_cmd(cmd)
            if not ret:
                log.error("Change chown failed for %s", err)
                return False
            tmp_log_path = os.path.join(each_log_path, "data")
            for backup_id in os.listdir(tmp_log_path):
                wait_for_link_log.append(os.path.join(tmp_log_path, backup_id))
        for each_backup in wait_for_link_log:
            for each_file in os.listdir(each_backup):
                if not self.create_symlink(os.path.join(each_backup, each_file),
                                           os.path.join(dest_path, each_file)):
                    return False
        # 处理catalog，只有日志恢复需要
        if self._parse_param.get_log_restore_timestamp():
            catalog_path = os.path.join(all_log_path[-1], "catalog")
            for each_file in os.listdir(catalog_path):
                if not self.create_symlink(os.path.join(catalog_path, each_file), os.path.join(dest_path, each_file)):
                    return False
        return True

    def prepare_restore_file(self):
        """
        方案：将所有副本的data以及catalog目录统一链接到同一个目录下
        """
        try:
            dir_instance = self._saphana_cluster.get_dir_instance_ex()
        except Exception as exception_str:
            log.error(f"Get dir instance failed. {self._parse_param.get_log_common()}")
            return False
        dest_path = os.path.join(dir_instance, self._job_id)
        user_info = pwd.getpwnam(f"{self._sid.lower()}adm")
        if not os.path.exists(dest_path):
            os.mkdir(dest_path)
            os.lchown(dest_path, user_info.pw_uid, user_info.pw_gid)
        data_path = self._parse_param.get_data_path()
        if not data_path:
            log.error(f"Get data path failed. {self._parse_param.get_log_common()}")
            return False
        all_data_copy_id, _ = self._parse_param.get_all_copy_id()
        # 先处理数据副本
        for data_copy_id in all_data_copy_id:
            tmp_data_path = os.path.join(data_path, data_copy_id)
            if not os.path.exists(tmp_data_path):
                log.error(f"tmp_data_path is not exist. {tmp_data_path}")
                return False
            # 修改权限
            cmd = f"chown -R {user_info.pw_uid}:{user_info.pw_gid} {tmp_data_path}"
            ret, _, err = execute_cmd(cmd)
            if not ret:
                log.error("Change chown failed for %s", err)
                return False
            for each_file in os.listdir(tmp_data_path):
                if not self.create_symlink(os.path.join(tmp_data_path, each_file), os.path.join(dest_path, each_file)):
                    return False
        if not self._parse_param.get_log_restore_timestamp():
            catalog_file_path = ""
            last_catalog_path = os.path.join(data_path, all_data_copy_id[-1])
            for each_item in os.listdir(last_catalog_path):
                if os.path.isdir(os.path.join(last_catalog_path, each_item)):
                    catalog_file_path = f"{os.path.join(last_catalog_path, each_item)}/log_backup_0_0_0_0"
                    break
            if not self.create_symlink(catalog_file_path, os.path.join(dest_path, "log_backup_0_0_0_0")):
                return False
        # 处理日志副本
        if not self.prepare_log_restore_file(dest_path, user_info):
            return False
        log.info("Prepare restore link file successfully.")
        return True

    def monitor_trace_file(self):
        ret, host_list = self._saphana_cluster.get_node_list()
        if not ret:
            log.error("Get node list failed.")
            return False
        # home_path = /home/hana/shared/S00/HDB00
        home_path = os.path.realpath(self._saphana_cluster.get_dir_instance_ex())
        for host in host_list:
            if self._db_name == SaphanaConst.SYSTEM_DB:
                log_path = f"{home_path}/{host}/trace/backup.log"
            else:
                log_path = f"{home_path}/{host}/trace/DB_{self._db_name}/backup.log"
            if os.path.exists(log_path):
                break
        log.info(f"System log path of saphana restore task is: {log_path}.")
        
        # 如果没有开始日志备份，该文件不会更新，除非执行备份恢复任务，故使用如下循环保证当前已经在执行备份恢复任务
        while True:
            last_modification_time = os.path.getmtime(log_path)
            if int(time.time()) - int(last_modification_time) < 30:
                log.info(f"Restore job beginning...")
                break
            time.sleep(SaphanaReportInterval.REPORT_PROGRESS_INTERVAL)
            
        while True:
            if self.log_file_is_update:
                log.info(f"The main thread returned, the child thread exits.")
                break
            last_modification_time = os.path.getmtime(log_path)
            # 数据库日志文件超过30分钟未更新，表明任务卡住
            if int(time.time()) - int(last_modification_time) > 60*30:
                self.is_restore_sub_task = True
                log.warn(f"The log file has not been updated. The recovery task may be stuck.")
                break
            time.sleep(SaphanaReportInterval.REPORT_PROGRESS_INTERVAL)
        return True

    def restore(self):
        if not self.prepare_restore_file():
            return False
        timestamp = self._parse_param.get_log_restore_timestamp()

        t = Thread(target=self.monitor_trace_file)
        t.start()
    
        if timestamp:
            ret = self.restore_log_copy()
        else:
            ret = self.restore_data_copy()
        self.log_file_is_update = True
        # 用于日志备份时感知恢复
        if ret:
            flag_path = os.path.join(
                os.path.dirname(self._parse_param.get_cache_path()),
                f"{self._saphana_cluster.db_name_prefix()}{self._db_name}_RESTORED")
            if not os.path.exists(flag_path):
                os.mknod(flag_path)
            log.info(f"Create flag file after restored: {flag_path}")
        self.restore_post()
        return ret

    def prepare_backint_config(self):
        log.info("Start to prepare backint config")
        user_info = pwd.getpwnam(f"{self._sid.lower()}adm")
        hdbconfig_file = f"{self._saphana_cluster.get_dir_instance()}/{SapConst.BACKINT_CONFIG_SUFFIX}"
        os.chown(hdbconfig_file, user_info.pw_uid, user_info.pw_gid)
        try:
            if read_tmp_json_file(hdbconfig_file).get("logBackupPath", ""):
                log.info("LogBackupPath exists, will return.")
                return
        except Exception as ex:
            log.error(f"Cannot get logBackupPath from {hdbconfig_file}.")
        log_backup_path = f"{self._saphana_cluster.get_dir_instance_ex()}/{SapConst.DEFAULT_LOGBACKUP_PATH_SUFFIX}"
        log_backup_path = os.path.realpath(log_backup_path)
        os.makedirs(os.path.dirname(log_backup_path), exist_ok=True)
        os.chown(os.path.dirname(log_backup_path), user_info.pw_uid, user_info.pw_gid)
        os.makedirs(log_backup_path, exist_ok=True)
        os.chown(log_backup_path, user_info.pw_uid, user_info.pw_gid)
        db_name = f"DB_{self._db_name.upper()}"
        if self._saphana_cluster.is_system_db():
            db_name = self._db_name.upper()
        db_log_backup_path = os.path.join(log_backup_path, db_name)
        os.makedirs(db_log_backup_path, exist_ok=True)
        os.chown(db_log_backup_path, user_info.pw_uid, user_info.pw_gid)
        log.info(f"Try make dir and chown, exist is ok: {log_backup_path}, {db_log_backup_path}")
        output_execution_result_ex(hdbconfig_file, {"logBackupPath": log_backup_path})
        log.info(f"Set logBackupPath to {log_backup_path}.")

    def _check_recover(self, data_dir: str, log_dir: str, catalog_dir: str, target_time: str) -> bool:
        files = os.listdir(data_dir) + os.listdir(log_dir)
        saphana = RestoreSAPHANA(self._sid, self._db_name, self._system_db_port, self._system_db_user, "")
        backups = saphana.list_required_backups(catalog_dir, target_time, ignore_delta=False)
        if set(backups) - set(files):
            log.warning(f"Missing backups without delta ignore. {backups} {files}")
        else:
            log.info("Check recover successfully without delta ignore.")
            self._ignore_delta = False
            return True
        backups = saphana.list_required_backups(catalog_dir, target_time, ignore_delta=True)
        if set(backups) - set(files):
            log.error(f"Missing backups with delta ignore. {backups} {files}")
            return False
        else:
            log.info("Check recover successfully with delta ignore.")
            self._ignore_delta = True
            return True
