#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import pathlib
import pwd
import stat
import sys
import time
import re
from shutil import copy

from gaussdbt.commons.const import Env, RoachConstant, NormalErr, RestoreMainEnum, RestoreProgressEnum, ProgressInfo, \
    IS_CLONE_FILE_SYSTEM
from gaussdbt.commons.const import log, RETRY_WAIT_SECONDS, RETRY_TIMES, AREA_PARENT_PATH
from gaussdbt.commons.database_common import gaussdbt_check_user_name_and_injection
from gaussdbt.commons.gaussdbt_common import record_gdb_err_code, query_gdb_err_code, exec_chmod_dir_recursively
from gaussdbt.commons.gaussdbt_param_protection import ParamProtection
from gaussdbt.commons.roach_meta_info import RoachMeta, mount_bind_backup_path, umount_bind_backup_path, \
    read_gauss_progress, write_progress_file
from gaussdbt.resource.gaussdbt_resource import GaussCluster
from gaussdbt.restore.check_restore_type import CheckRestore
from gaussdbt.restore.exec_restore import ExecRestore
from common.common import output_result_file, convert_timestamp_to_time, exter_attack, check_path_legal
from common.common_models import ActionResult, SubJobDetails, LogDetail
from common.const import JobData, ExecuteResultEnum, RepositoryDataTypeEnum, SubJobStatusEnum, DeployType, \
    ReportDBLabel, DBLogLevel, SysData, BackupTypeEnum
from common.file_common import change_path_permission
from common.parse_parafile import get_user_name
from common.util.exec_utils import exec_cp_cmd, exec_cat_cmd, exec_mkdir_cmd, exec_ln_cmd, exec_mv_cmd


class RestoreOutput:
    def __init__(self):
        self.output_code = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, bodyErr=0, message='')
        self.progress_info = SubJobDetails(taskId='', subTaskId='', taskStatus=SubJobStatusEnum.FAILED.value,
                                           logDetail=[], progress=0, dataSize=0, speed=0, extendInfo=None)
        self.err_log = LogDetail(logInfo='', logInfoParam=[], logTimestamp=0, logDetail=0, logDetailParam=[],
                                 logDetailInfo=[], logLevel=3)

    @staticmethod
    def read_version(meta_path: str, backup_key: str):
        log.info('Read version from json file')
        json_path = os.path.join(meta_path, backup_key)
        json_file = os.path.join(json_path, 'metadata.json')
        if not os.path.exists(json_file):
            log.error(f'File: {json_file} not exists!')
            return ''
        return_code, data = exec_cat_cmd(json_file)
        if not return_code:
            log.error(f'Get {json_file} info error!')
            return ''
        param_dict = json.loads(data)
        if not param_dict.get("version"):
            log.error("Failed to get version info")
            return ''
        return param_dict.get("version")

    @staticmethod
    def get_copy_dict(job_dict: dict):
        time_stamp = None
        if not job_dict.get("extendInfo", {}):
            return time_stamp, {}
        if job_dict.get("extendInfo", {}).get("restoreTimestamp") and len(job_dict.get("copies", [{}])) > 1:
            time_stamp = convert_timestamp_to_time(int(job_dict.get("extendInfo", {}).get("restoreTimestamp")))
            find_copy_dict = job_dict.get("copies", [{}])[-2]
        else:
            find_copy_dict = job_dict.get("copies", [{}])[-1]
        return time_stamp, find_copy_dict

    @staticmethod
    def get_context(context_tmp: {}):
        meta_mount = context_tmp.get("meta_area")
        new_meta_path = context_tmp.get("meta_area")
        meta_path = RoachConstant.ROACH_META_FILE_PATH
        return meta_mount, new_meta_path, meta_path

    @staticmethod
    @exter_attack
    def parse_param():
        """
        解析参数文件，获取备份所需信息
        :return: dict()
        """
        log.info("Parsing params from json file and ini file")
        context = dict()
        try:
            param_protection = ParamProtection(JobData.PID)
        except Exception:
            log.error("Failed to parse param file")
            output = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, message="Failed to parse param file")
            output_result_file(JobData.PID, output.dict(by_alias=True))
            return context
        context["time_stamp"] = param_protection.get_restore_time_stamp()
        context["new_cluster"] = param_protection.get_new_cluster()
        context["backup_key"] = param_protection.get_restore_key()
        context["meta_path"] = os.path.join(RoachConstant.ROACH_DATA_FILE_PATH, 'metadata')
        if param_protection.get_subjob():
            sub_job_dict = param_protection.get_subjob()
            context["sub_job_name"] = sub_job_dict.get("jobName")
        copy_dict = param_protection.get_data_copy_dict()
        if not copy_dict:
            log.error("Copy info is none")
            return {}
        context["new_backup_copy"] = RestoreOutput.check_old_or_new_backup_copy(copy_dict, context.get("backup_key"))
        context["copy_type"] = param_protection.get_copy_type()
        RestoreOutput.deal_with_repositories_info(context, copy_dict)
        context["version_path"] = context.get("meta_area")
        copy_file = os.path.join(os.path.join(context.get("meta_area"), context.get("backup_key")), "metadata")
        context["log_path"] = param_protection.get_log_path()
        if context.get("log_path"):
            sub_dir = set_sub_dir_for_cluster_nodes(context.get("new_backup_copy"))
            context["log_path"] = os.path.join(context.get("log_path"), sub_dir)
        if not rename_clone_file_system_paths(context, context.get("backup_key")):
            output = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, message="Failed to parse param file")
            output_result_file(JobData.PID, output.dict(by_alias=True))
            return context
        if os.path.exists(os.path.join(context.get("meta_area"), "roach", context.get("backup_key"))):
            context["meta_mount"], context["new_meta_path"], context["meta_path"] = RestoreOutput.get_context(context)
            context["version_path"] = os.path.join(context.get("meta_area"), "roach")
            copy_file = context.get("meta_area")
            copy_ini(os.path.join(context.get("meta_area"), "roach", context["backup_key"]))
        elif os.path.exists(os.path.join(context.get("meta_area"), context.get("backup_key"))):
            if not copy_meta_data(context):
                return {}
        context = get_old_restore_param(context, copy_file)
        if os.path.exists(os.path.join(context.get("meta_area"), "roach", context.get("backup_key"))):
            context["meta_path"] = RoachConstant.ROACH_META_FILE_PATH
        context["version"] = RestoreOutput.read_version(context.get("version_path"), context.get("backup_key"))
        context["parallel_process"] = param_protection.get_parallel_process()
        return context

    @staticmethod
    def deal_with_repositories_info(context, copy_dict):
        log.info(f"Start deal with repo info")
        repositories_info = copy_dict.get("repositories", [])
        sub_dir = set_sub_dir_for_cluster_nodes(context.get("new_backup_copy"))
        for reps in repositories_info:
            if reps.get("repositoryType") == RepositoryDataTypeEnum.META_REPOSITORY:
                check_sub_dir = ""
                if sub_dir:
                    check_sub_dir = f"{sub_dir}/meta"
                context["check_meta"] = os.path.join(reps.get("path")[0], check_sub_dir)
                context["meta_area"] = os.path.join(reps.get("path")[0], sub_dir, 'meta')
            if reps.get("repositoryType") == RepositoryDataTypeEnum.DATA_REPOSITORY:
                check_sub_dir = ""
                if sub_dir:
                    check_sub_dir = f"{sub_dir}/data"
                context["check_data"] = os.path.join(reps.get("path")[0], check_sub_dir)
                context["new_media_path"] = os.path.join(reps.get("path")[0], sub_dir, 'data', 'mediadata')
                context["new_meta_path"] = os.path.join(reps.get("path")[0], sub_dir, 'data', 'metadata')
                context["meta_mount"] = None
                context["new_data"] = os.path.join(reps.get("path")[0], sub_dir, 'data')
                rep_extend_info = reps.get("extendInfo")
                context[IS_CLONE_FILE_SYSTEM] = rep_extend_info.get("isCloneFileSystem", True)
            if reps.get("repositoryType") == RepositoryDataTypeEnum.CACHE_REPOSITORY:
                context["cache_path"] = reps.get("path")[0]
                # 若主备节点uid不一致，会导致一边没有权限写入，扩大权限
                if GaussCluster.get_deploy_type() == DeployType.CLUSTER_TYPE:
                    user_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
                    change_path_permission(context["cache_path"], user_name, mode=0o777)

    @staticmethod
    def check_old_or_new_backup_copy(copy_dict, backup_key):
        log.info("Start to check old or new backup copy.")
        repositories_info = copy_dict.get("repositories", [])
        for reps in repositories_info:
            if reps.get("repositoryType") != RepositoryDataTypeEnum.META_REPOSITORY:
                continue
            test_meta_path = ""
            for path in os.listdir(reps.get("path")[0]):
                pattern = r"^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$"
                if re.match(pattern, path):
                    test_meta_path = os.path.join(reps.get("path")[0], path, 'meta', 'roach', backup_key)
                    break
            if not test_meta_path:
                log.info("There's no node_ip dirs. This is an old backup copy (without sub dirs)!")
                return False
            if os.path.exists(test_meta_path):
                log.info("This is a new backup copy (with sub dirs)!")
                return True
            meta_info_path = os.path.join(reps.get("path")[0], 'meta', 'roach', backup_key)
            if os.path.exists(meta_info_path):
                log.info("This is an old backup copy (without sub dirs)!")
                return False
            log.error(f"Can not find meta info dir: {meta_info_path} or {test_meta_path}!")
        return False

    @staticmethod
    def copy_into_meta(input_path):
        log.info('Copy ini file!')
        db_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
        copy_file = os.path.join(input_path.get("meta_path"), RoachConstant.ROACH_META_FILE_NAME)
        source_meta = os.path.join(input_path.get("meta_area"), input_path.get("backup_key"))
        source_file = os.path.join(source_meta, RoachConstant.ROACH_META_FILE_NAME)
        if os.path.exists(source_file):
            exec_cp_cmd(source_file, input_path.get("meta_path"))
            if os.path.exists(copy_file):
                change_path_permission(copy_file, db_name)
            else:
                log.error(f'File {copy_file} not exist!')
                return False
        log.info('Copy ini file succeed!')
        return True

    def set_start_label(self, label: str, sub_task_id: str, input_dict: dict):
        label_file = os.path.join(input_dict.get("cache_path"), f'{sub_task_id}{GaussCluster.get_hostname()}lb')
        if os.path.exists(label_file):
            return
        else:
            try:
                pathlib.Path(label_file).touch()
            except Exception:
                log.error(f'Error while creating label file {label_file}')
                return
        self.err_log.log_info = label
        self.err_log.log_info_param = [sub_task_id]
        self.err_log.log_level = DBLogLevel.INFO.value
        self.progress_info.log_detail.append(self.err_log)

    def set_progress_succeed(self, sub_task_id: str):
        self.progress_info.sub_task_id = sub_task_id
        self.progress_info.progress = 100
        self.progress_info.task_status = SubJobStatusEnum.COMPLETED


def get_old_restore_param(context, copy_file):
    log.info(f"Get info from dir: {copy_file}")
    nodeinfo = RoachMeta.parse_ini_get_node_info(context.get("backup_key"), copy_file)
    context["node_count"] = nodeinfo.node_count
    context["primary_count"] = nodeinfo.primary_count
    context["standby_count"] = nodeinfo.standby_count
    context["media_dest"] = nodeinfo.mediadest_path
    context["meta_dest"] = nodeinfo.metadest_path
    if context.get("meta_dest") == RoachConstant.ROACH_OLD_META_FILE_PATH:
        RoachConstant.ROACH_DATA_FILE_PATH = RoachConstant.ROACH_OLD_DATA_FILE_PATH
        RoachConstant.ROACH_META_FILE_PATH = RoachConstant.ROACH_OLD_META_FILE_PATH
        RoachConstant.ROACH_LOG_FILE_PATH = RoachConstant.ROACH_OLD_LOG_FILE_PATH
        mode = stat.S_IRUSR | stat.S_IXUSR
        db_name = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
        change_path_permission(RoachConstant.ROACH_OLD, db_name, mode=mode)
    return context


def set_output_code(is_success: bool):
    output_code = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, bodyErr=0, message='')
    if is_success:
        output_code.code = ExecuteResultEnum.SUCCESS
    else:
        output_code.code = ExecuteResultEnum.INTERNAL_ERROR
        output_code.message = 'Execute Restore failed, please check the log'
    return output_code


def copy_ini(file_path):
    ini_path = os.path.join(file_path, RoachConstant.ROACH_META_FILE_NAME)
    ini_file = os.path.join(os.path.dirname(os.path.dirname(file_path)), RoachConstant.ROACH_META_FILE_NAME)
    if not os.path.exists(ini_file):
        try:
            copy(ini_path, os.path.dirname(os.path.dirname(file_path)))
        except Exception as err_copy:
            log.error(err_copy)
            return False
        log.info("Copy ini file to meta path ")
    return True


def write_progress_by_result(result: bool, progress_file: str):
    if result:
        write_progress_file(ProgressInfo.SUCCEED, progress_file)
    else:
        write_progress_file(ProgressInfo.FAILED, progress_file)


def change_path_user(username, path):
    change_path_permission(path, username)
    for file in os.listdir(path):
        abs_path = os.path.join(path, file)
        change_path_permission(abs_path, username)
        if os.path.isdir(abs_path):
            change_path_user(username, abs_path)
    return True


def do_restore_pre(input_dict: dict, task_id: str):
    progress_dict = SubJobDetails(taskId=task_id, subTaskId='', taskStatus=SubJobStatusEnum.FAILED.value, logDetail=[],
                                  progress=0, dataSize=0, speed=0, extendInfo=None)
    err_dict = LogDetail(logInfo='', logInfoParam=[], logTimestamp=0, logDetail=0, logDetailParam=[], logDetailInfo=[],
                         logLevel=3)
    pre_job_err = query_gdb_err_code(os.path.join(input_dict.get("cache_path"), f'{JobData.JOB_ID}errcode'))
    if pre_job_err == NormalErr.NO_ERR:
        err_dict.log_detail = None
        progress_dict.progress = 100
        progress_dict.task_status = SubJobStatusEnum.COMPLETED.value
    elif pre_job_err == NormalErr.WAITING:
        err_dict.log_detail = None
        progress_dict.progress = 50
        progress_dict.task_status = SubJobStatusEnum.RUNNING.value
    else:
        err_dict.log_detail = pre_job_err
        progress_dict.log_detail.append(err_dict)
        progress_dict.progress = 0
        progress_dict.task_status = SubJobStatusEnum.FAILED.value
    return progress_dict


@exter_attack
def do_progress_work(func_type: str):
    progress_instance = RestoreOutput()
    input_info = progress_instance.parse_param()
    if not input_info:
        log.error('context err')
        return ExecuteResultEnum.INTERNAL_ERROR
    new_data_ = input_info.get("new_data")
    if new_data_ and not check_path_legal(new_data_, AREA_PARENT_PATH):
        log.error("The path is not in the trustlist.")
        return ExecuteResultEnum.INTERNAL_ERROR
    progress_instance.progress_info.task_id = JobData.JOB_ID
    exec_ins = ExecRestore(input_info)
    if input_info.get("sub_job_name"):
        if all(["prepare" in input_info.get("sub_job_name"), func_type == "RestoreProgress"]):
            progress_file = os.path.join(input_info.get("cache_path"), f'{JobData.JOB_ID}{GaussCluster.get_hostname()}')
            result_file = os.path.join(input_info.get("meta_path"), RoachConstant.ROACH_META_FILE_NAME)
            progress_instance.progress_info.sub_task_id = sys.argv[4]
            # 检查mount bind情况，文件存在置成功，不存在则置进行中
            if os.path.exists(result_file):
                write_progress_by_result(os.path.exists(result_file), progress_file)
            else:
                write_progress_file(ProgressInfo.START, progress_file)
            progress_instance.set_start_label(ReportDBLabel.RESTORE_SUB_START_PREPARE,
                                              sys.argv[4], input_info)
            progress_instance.progress_info.progress, progress_instance.progress_info.task_status = \
                read_gauss_progress(progress_file, input_info.get("cache_path"))
            output_result_file(JobData.PID, progress_instance.progress_info.dict(by_alias=True))
            return ExecuteResultEnum.SUCCESS
        if all(["umount" in input_info.get("sub_job_name"), func_type == "RestoreProgress"]):
            progress_instance.set_progress_succeed(sys.argv[4])
            progress_instance.set_start_label(ReportDBLabel.RESTORE_SUB_START_UMOUNT,
                                              sys.argv[4], input_info)
            output_result_file(JobData.PID, progress_instance.progress_info.dict(by_alias=True))
            return ExecuteResultEnum.SUCCESS
    if func_type == "RestorePrerequisiteProgress":
        progress_instance.progress_info = do_restore_pre(input_info, JobData.JOB_ID)
    elif func_type == "RestoreProgress":
        status_file = os.path.join(input_info.get("cache_path"),
                                   f'{JobData.JOB_ID}{GaussCluster.get_hostname()}{"restore_main"}')
        result, progress_instance.progress_info.progress = exec_ins.query_progress()
        _, progress_instance.progress_info.task_status = read_gauss_progress(status_file, input_info.get("cache_path"))
        progress_instance.progress_info.sub_task_id = sys.argv[4]
        progress_instance.set_start_label(ReportDBLabel.RESTORE_SUB_START_COPY, sys.argv[4], input_info)
    elif func_type == "RestorePostProgress":
        if GaussCluster.get_deploy_type() == DeployType.SINGLE_TYPE:
            umount_bind_backup_path()
        progress_instance.set_progress_succeed(sys.argv[4])
    output_result_file(JobData.PID, progress_instance.progress_info.dict(by_alias=True))
    return ExecuteResultEnum.SUCCESS


def set_sub_dir_for_cluster_nodes(is_new_backup_copy=None):
    """
    为集群节点创建分别设置对应挂载子目录名称
    :return: 集群节点返回子目录名称，否则返回空
    """
    # 单机模式不需要添加子目录
    if GaussCluster.get_deploy_type() != DeployType.CLUSTER_TYPE:
        return ""
    # 适配老版本副本，老版本副本不需要添加子目录
    if not is_new_backup_copy:
        log.info("Old type copy, don't need sub dir.")
        return ""
    sub_dir = GaussCluster.get_endpoint_by_hostname()
    log.info(f"Sub dir is set to: {sub_dir}")
    return sub_dir


def cluster_prepare_for_no_clone_file_system(input_info):
    # 第一步，搞data，备份集的摘要信息backupset复制进去，因为要有写权限，其他信息做软链接
    data_cache_dir = os.path.join(input_info.get("cache_path"), 'data')
    if not os.path.exists(data_cache_dir):
        exec_mkdir_cmd(data_cache_dir)
    database_user = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    change_path_user(database_user, data_cache_dir)
    mediadata_roach_cache_dir = os.path.join(input_info.get("cache_path"), 'data', 'mediadata', 'roach')
    if not os.path.exists(mediadata_roach_cache_dir):
        cteate_data(database_user, input_info, mediadata_roach_cache_dir)
    # 第二步，搞meta，直接拷贝进去
    return_code = exec_cp_cmd(input_info.get("meta_area"), input_info.get("cache_path"))
    if not return_code:
        log.error("Failed to exec copy.")
    if input_info.get("log_path"):
        ret = mount_bind_backup_path(os.path.join(input_info.get("cache_path"), 'data'),
                                     os.path.join(input_info.get("cache_path"), 'meta'),
                                     input_info.get("log_path"), backup_type=BackupTypeEnum.LOG_BACKUP)
    else:
        ret = mount_bind_backup_path(os.path.join(input_info.get("cache_path"), 'data'),
                                     os.path.join(input_info.get("cache_path"), 'meta'))
    return ret


def do_cluster_restore_sub_job(func_type, input_info, main_instance):
    if all(["prepare" in input_info.get("sub_job_name"), func_type == "Restore"]):
        log.info("Start to do cluster restore prepare sub job.")
        progress_file = os.path.join(input_info.get("cache_path"), f'{JobData.JOB_ID}{GaussCluster.get_hostname()}')
        write_progress_file(ProgressInfo.START, progress_file)
        if not input_info.get(IS_CLONE_FILE_SYSTEM, True):
            ret = cluster_prepare_for_no_clone_file_system(input_info)
        else:
            database_user = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
            data_path = input_info.get("new_data")
            meta_path = input_info.get("meta_mount")
            change_path_user(database_user, data_path)
            change_path_user(database_user, meta_path)
            if input_info.get("log_path"):
                log_path = input_info.get("log_path")
                change_path_user(database_user, log_path)
                ret = mount_bind_backup_path(data_path, meta_path, log_path, backup_type=BackupTypeEnum.LOG_BACKUP)
            else:
                ret = mount_bind_backup_path(data_path, meta_path)
        if not ret:
            output_code = \
                ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, bodyErr=0, message='Mount bind path failed')
            output_result_file(JobData.PID, output_code.dict(by_alias=True))
            return ExecuteResultEnum.INTERNAL_ERROR
        main_instance.output_code = set_output_code(main_instance.copy_into_meta(input_info))
        output_result_file(JobData.PID, main_instance.output_code.dict(by_alias=True))
        return ExecuteResultEnum.SUCCESS
    if all(["umount" in input_info.get("sub_job_name"), func_type == "Restore"]):
        umount_bind_backup_path()
        main_instance.output_code.code = ExecuteResultEnum.SUCCESS
        output_result_file(JobData.PID, main_instance.output_code.dict(by_alias=True))
        return ExecuteResultEnum.SUCCESS
    output_code = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, bodyErr=0, message='Can not execute job')
    output_result_file(JobData.PID, output_code.dict(by_alias=True))
    return ExecuteResultEnum.INTERNAL_ERROR


def cteate_data(database_user, input_info, mediadata_roach_cache_dir):
    exec_mkdir_cmd(mediadata_roach_cache_dir)
    change_path_user(database_user, os.path.join(input_info.get("cache_path"), 'data', 'mediadata'))
    change_path_user(database_user, mediadata_roach_cache_dir)
    data_src_path = os.path.join(input_info.get("new_data"), 'mediadata', 'roach')
    data_backup_key_dir_list = os.listdir(data_src_path)
    for backup_key_dir in data_backup_key_dir_list:
        if not os.path.exists(os.path.join(mediadata_roach_cache_dir, backup_key_dir)):
            exec_mkdir_cmd(os.path.join(mediadata_roach_cache_dir, backup_key_dir))
        db_dir_list = os.listdir(os.path.join(data_src_path, backup_key_dir))
        for db_dir in db_dir_list:
            if not os.path.exists(os.path.join(mediadata_roach_cache_dir, backup_key_dir, db_dir)):
                exec_mkdir_cmd(os.path.join(mediadata_roach_cache_dir, backup_key_dir, db_dir))
            inner_dir_list = os.listdir(os.path.join(data_src_path, backup_key_dir, db_dir))
            for inner_dir in inner_dir_list:
                dst_inner_dir = os.path.join(mediadata_roach_cache_dir, backup_key_dir, db_dir, inner_dir)
                create_inner_file(backup_key_dir, data_src_path, db_dir, dst_inner_dir, inner_dir)


def create_inner_file(backup_key_dir, data_src_path, db_dir, dst_inner_dir, inner_dir):
    if not os.path.exists(dst_inner_dir):
        exec_mkdir_cmd(dst_inner_dir)
    inner_file_list = os.listdir(os.path.join(data_src_path, backup_key_dir, db_dir, inner_dir))
    for inner_file in inner_file_list:
        inner_file_path = os.path.join(data_src_path, backup_key_dir, db_dir, inner_dir, inner_file)
        if inner_file == "backupset":
            return_code = exec_cp_cmd(inner_file_path, dst_inner_dir)
            if not return_code:
                log.error("Failed to exec copy.")
        else:
            # 创建软链接
            exec_ln_cmd(inner_file_path, os.path.join(dst_inner_dir, inner_file))


def copy_meta_data(input_path):
    log.info('Copy meta data folder')
    source_meta = input_path.get("new_data")
    copy_file = os.path.join(os.path.join(input_path.get("meta_area"), input_path.get("backup_key")), "metadata")
    if not os.path.exists(source_meta):
        return False
    return_code = exec_cp_cmd(copy_file, source_meta)
    if return_code:
        log.info("Succeed to copy backup task")
        return True
    log.error(f"Failed to copy meta")
    return False


def generate_sub_dir_match_table(data_repo_path, all_nodes, backup_key):
    """
    集群模式新位置恢复生成原位置和新位置对应子目录名的字典
    :param data_repo_path: 挂载数据目录
    :param all_nodes: 所有本地节点信息
    :param backup_key: backup key
    :return: 原位置和新位置对应子目录名的字典
    """
    match_table = dict()
    origin_sub_path_names = os.listdir(data_repo_path)
    origin_ips = []
    for sub_path_name in origin_sub_path_names:
        pattern = r"^(?:[0-9]{1,3}\.){3}[0-9]{1,3}$"
        if re.match(pattern, sub_path_name):
            origin_ips.append(sub_path_name)
    log.info(f"Generating match table for origin ips: {origin_ips}.")
    if len(all_nodes) != len(origin_ips):
        log.error(f"Origin paths num({len(origin_ips)}) does not match nodes num({len(all_nodes)}).")
        return False, match_table
    for origin_ip in origin_ips:
        match_table[origin_ip] = ""
        data_sub_path = os.path.join(data_repo_path, origin_ip, 'data', 'mediadata', 'roach', backup_key)
        for child_dir in os.listdir(data_sub_path):
            if child_dir.startswith("DB1_"):
                match_table[origin_ip] = child_dir
                log.info(f"Node {origin_ip} has data of instance {child_dir}.")
                break
    instance_info = dict()
    for node in all_nodes:
        instance_info[node.instance_name] = node.node_ip
    log.info(f"New location instance info: {instance_info}")
    for origin_ip, instance_name in match_table.items():
        if instance_name:
            match_table[origin_ip] = instance_info.get(instance_name)
            instance_info.pop(instance_name)
    for origin_ip, new_ip in match_table.items():
        if not new_ip:
            first_instance = list(instance_info.keys())[0]
            match_table[origin_ip] = instance_info.get(first_instance)
            instance_info.pop(first_instance)
    log.info(f"Generate match table for origin ips successfully, table: {match_table}")
    return True, match_table


def rename_for_repo_paths(base_repo_path, match_table):
    origin_repo_paths = []
    for sub_path_name in match_table.keys():
        origin_repo_paths.append(os.path.join(base_repo_path, sub_path_name))
    log.info(f"Renaming for origin paths: {origin_repo_paths}.")
    for origin_ip, new_ip in match_table.items():
        origin_repo_path = os.path.join(base_repo_path, origin_ip)
        dest_repo_path = os.path.join(base_repo_path, new_ip)
        if not exec_mv_cmd(origin_repo_path, dest_repo_path, check_white_black_list_flag=False):
            log.error(f"Rename {origin_repo_path} to {dest_repo_path} failed.")
            return False
        log.info(f"Rename {origin_repo_path} to {dest_repo_path} success.")
    return True


def rename_clone_file_system_paths(input_info, backup_key):
    # 单机部署无需适配
    if GaussCluster.get_deploy_type() != DeployType.CLUSTER_TYPE:
        return True
    # 老副本无需修改
    if not input_info.get("new_backup_copy"):
        return True
    # 原位置恢复或已重命名，无需重命名子路径
    if os.path.exists(input_info.get("new_data")):
        log.info(f"Origin location restore or already renamed, no need to rename.")
        return True
    all_nodes = GaussCluster.get_all_node()
    if not all_nodes:
        log.error("Node information is empty!")
        return False
    log.info(f"New location restore, start to rename repositories paths.")
    base_data_path = os.path.dirname(os.path.dirname(input_info.get("new_data")))
    base_meta_path = os.path.dirname(os.path.dirname(input_info.get("meta_area")))
    ret, match_table = generate_sub_dir_match_table(base_data_path, all_nodes, backup_key)
    if not ret:
        return False
    if not rename_for_repo_paths(base_data_path, match_table):
        return False
    if not rename_for_repo_paths(base_meta_path, match_table):
        return False
    if input_info.get("log_path"):
        base_log_path = os.path.dirname(input_info.get("log_path"))
        if not rename_for_repo_paths(base_log_path, match_table):
            return False
    log.info("Rename clone file system paths successfully.")
    return True


def do_restore_pre_job(input_info):
    log.info(f"Start to do restore pre job.")
    database_user = get_user_name(f"{Env.USER_NAME}_{JobData.PID}")
    result = ActionResult(code=ExecuteResultEnum.INTERNAL_ERROR, bodyErr=0, message="")
    if gaussdbt_check_user_name_and_injection(database_user):
        log.error("Get user name failed!")
        result.message = "Get user name failed."
        return result
    if input_info.get(IS_CLONE_FILE_SYSTEM, True):
        data_path = os.path.dirname(os.path.realpath(input_info.get("new_media_path")))
        change_path_user(database_user, data_path)
        change_path_user(database_user, input_info.get("meta_area"))
        return_code = exec_chmod_dir_recursively(input_info.get("new_data"), stat.S_IRWXU)
        if not return_code:
            log.error(f"Failed to exec cmd chown")
            result.message = "Chmod failed"
            return result
        return_code = exec_chmod_dir_recursively(input_info.get("meta_area"), stat.S_IRWXU)
        if not return_code:
            log.error(f"Failed to exec cmd chown")
            result.message = "Chmod failed"
            return result
    check_instance = CheckRestore(input_info)
    do_pre_job = check_instance.pre_restore_job()
    err_code_file = os.path.join(input_info.get("cache_path"), f'{JobData.JOB_ID}errcode')
    if not record_gdb_err_code(do_pre_job.value, err_code_file):
        result.code = ExecuteResultEnum.INTERNAL_ERROR
        return result
    deploy_type = DeployType.INVALID_TYPE
    for _ in range(RETRY_TIMES):
        deploy_type = GaussCluster.get_deploy_type()
        if deploy_type not in (DeployType.SINGLE_TYPE, DeployType.CLUSTER_TYPE):
            log.info(f"Failed getting status, wait for {RETRY_WAIT_SECONDS} seconds")
            time.sleep(RETRY_WAIT_SECONDS)
    if deploy_type == DeployType.SINGLE_TYPE:
        if input_info.get("log_path"):
            ret = mount_bind_backup_path(input_info.get("new_data"), input_info.get("meta_mount"),
                                         input_info.get("log_path"), backup_type=BackupTypeEnum.LOG_BACKUP)
        else:
            ret = mount_bind_backup_path(input_info.get("new_data"), input_info.get("meta_mount"))
        if not ret:
            result.message = "Mount bind path failed"
            return result
    result.code = ExecuteResultEnum.SUCCESS
    return result


@exter_attack
def do_main_work(func_type: str):
    main_instance = RestoreOutput()
    input_info = main_instance.parse_param()
    if not input_info:
        log.error('Parse file failed')
        return ExecuteResultEnum.INTERNAL_ERROR
    new_data_ = input_info.get("new_data")
    if new_data_ and not check_path_legal(new_data_, AREA_PARENT_PATH):
        log.error("The path is not in the trustlist.")
        return ExecuteResultEnum.INTERNAL_ERROR
    exec_ins = ExecRestore(input_info)
    if input_info.get("sub_job_name") in ("prepare", "umount"):
        return do_cluster_restore_sub_job(func_type, input_info, main_instance)
    if func_type == "RestorePrerequisite":
        main_instance.output_code = do_restore_pre_job(input_info)
    elif func_type == "Restore":
        if exec_ins.do_restore_job():
            main_instance.output_code = set_output_code(exec_ins.restart_cluster())
    elif func_type == "RestorePost":
        main_instance.output_code = set_output_code(exec_ins.do_restore_post())
    output_result_file(JobData.PID, main_instance.output_code.dict(by_alias=True))
    return main_instance.output_code.code


@exter_attack
def restore_prerequisite_progress():
    log.info("Restore prerequisite progress start")
    try:
        param_protection = ParamProtection(JobData.PID)
    except Exception:
        log.error("Failed to parse param file")
        return ExecuteResultEnum.INTERNAL_ERROR
    copy_dict = param_protection.get_data_copy_dict()
    backup_key = param_protection.get_restore_key()
    if not copy_dict:
        log.error("Copy info is none")
        return ExecuteResultEnum.INTERNAL_ERROR
    input_info = dict()
    input_info["new_backup_copy"] = RestoreOutput.check_old_or_new_backup_copy(copy_dict, backup_key)
    RestoreOutput.deal_with_repositories_info(input_info, copy_dict)
    if not input_info:
        log.error('context err')
        return ExecuteResultEnum.INTERNAL_ERROR
    progress_instance = RestoreOutput()
    progress_instance.progress_info.task_id = JobData.JOB_ID
    progress_instance.progress_info = do_restore_pre(input_info, JobData.JOB_ID)
    output_result_file(JobData.PID, progress_instance.progress_info.dict(by_alias=True))
    return ExecuteResultEnum.SUCCESS


if __name__ == '__main__':
    log.info("Running restore main...")
    function_type = sys.argv[1]
    JobData.PID = sys.argv[2]
    JobData.JOB_ID = sys.argv[3]
    SysData.SYS_STDIN = sys.stdin.readline()
    log.info(f'Start to exec function: {function_type},PID: {JobData.PID} task ID: {JobData.JOB_ID}')
    Env.USER_NAME = 'job_targetEnv_auth_authKey'
    if function_type in RestoreMainEnum:
        sys.exit(do_main_work(function_type))
    elif function_type in RestoreProgressEnum:
        sys.exit(do_progress_work(function_type))
    elif function_type == "RestorePrerequisiteProgress":
        sys.exit(restore_prerequisite_progress())
    else:
        log.error(f'Function {function_type} not exist!')
        sys.exit(ExecuteResultEnum.INTERNAL_ERROR)
