#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os

from dws.commons.common import log, check_path_valid, has_uppercase
from dws.commons.parse_param import parse_param_with_jsonschema
from common.common import exter_attack, check_command_injection_ex, check_command_injection_exclude_quote
from common.const import RepositoryDataTypeEnum, RestoreType, RepoProtocalType
from common.util.check_utils import is_valid_uuid
from dws.commons.const import CopyGenerateType, StorageRole, IntrusiveMode, JsonKey
from dws.commons.function_tool import log_start


class ParseRestoreJobParam:

    @exter_attack
    def __init__(self, pid):
        self._pid = pid
        self._restore_job_param = {}
        self._copy = {}
        self._initialized = False
        self._main_task_id = ""
        self._get_param_from_file()
        if self._is_init():
            self._main_task_id = self._restore_job_param.get("job").get("jobId")

    def get_pid(self):
        return self._pid

    def get_main_job_id(self):
        return self._main_task_id

    @exter_attack
    def get_copy_generate_type(self):
        """
        返回值：GDS/roach
        """
        if not self._is_init():
            return CopyGenerateType.INVALID_GENERATE_TYPE
        gen_type = CopyGenerateType.INVALID_GENERATE_TYPE
        if self.get_copy_type() == "s3Archive" or self.get_copy_type() == "tapeArchive":
            extend_info = self._copy.get("extendInfo", {}).get("extendInfo", {})
        else:
            extend_info = self._copy.get("extendInfo")
        if not extend_info:
            log.error(f"No extend info in copy, main task: {self._main_task_id}.")
            return gen_type
        gen_type_str = extend_info.get("backupToolType")
        return gen_type_str

    @exter_attack
    def get_copy_type(self):
        """
        返回值：full/increment/replication/s3Archive etc
        """
        if not self._is_init():
            return ""
        backup_type = self._copy.get("type")
        if not backup_type:
            log.error(f"No backup type info in extendInfo, main task: {self._main_task_id}.")
            return ""
        return backup_type

    @exter_attack
    def get_cache_path(self):
        """
        get cache repository. return a list ['', '', '']
        """
        return self._get_single_type_repo_path(RepositoryDataTypeEnum.CACHE_REPOSITORY)

    @exter_attack
    def get_metadata_path(self):
        """
        get metadata repository. return a list ['', '', '']
        """
        return self._get_single_type_repo_path(RepositoryDataTypeEnum.META_REPOSITORY)

    @exter_attack
    def get_copy_id(self):
        """
        获取备份副本ID
        """
        if not self._is_init():
            return ""
        copy_id = self._copy.get("id")
        if not is_valid_uuid(copy_id):
            log.error(f"Copy id is invalid {copy_id}")
            return ""
        return copy_id

    @exter_attack
    def get_source_backup_copy_id(self):
        """
        获取恢复副本本身的ID
        """
        copy_type = self.get_copy_type()
        if copy_type == "tapeArchive":
            # 如果是归档到磁带副本，需要从extendInfo里拿原始备份副本
            copy_id = self._copy.get("extendInfo", {}).get("extendInfo", {}).get(JsonKey.COPY_ID, "")
        elif copy_type == "s3Archive":
            copy_id = self._copy.get("id", "")
        else:
            copy_id = self._copy.get("extendInfo", {}).get(JsonKey.COPY_ID, "")
        if not is_valid_uuid(copy_id):
            log.error(f"Copy id is invalid {copy_id}")
            return ""
        return copy_id

    @exter_attack
    def is_increment_copy(self):
        """
        判断是否是增量备份
        """
        if not self._is_init():
            return False
        copies_info = self._restore_job_param.get("job").get("copies")
        if len(copies_info) > 1:
            return True
        return False

    @log_start()
    @exter_attack
    def get_repo_info(self):
        """
        获取所有存储仓信息
        """
        res_list = []
        # 如果是s3副本
        if self.get_copy_type() == "s3Archive":
            for repo in self._copy.get("extendInfo", {}).get("repositories", []):
                if repo.get("type") != RepositoryDataTypeEnum.DATA_REPOSITORY:
                    continue
                role = repo.get("role", "")
                esn = repo.get("extendInfo", {}).get("esn", "")
                file_systems = self._get_archive_filesystems_from_remote_path(repo)
                res_list.append({"role": role, "deviceSN": esn, "filesystems": file_systems})
            return res_list
        # 其他类型副本
        for repo in self._copy.get("repositories"):
            if repo.get("repositoryType") != RepositoryDataTypeEnum.DATA_REPOSITORY:
                continue
            role = repo.get("role", "")
            esn = repo.get("extendInfo", {}).get("esn", "")
            fs_dict = {
                "id": repo.get("extendInfo", {}).get("fsId", ""),
                "name": repo.get("remotePath", "").strip('/'),
                "sharePath": repo.get("remotePath", "").strip('/'),
                "mountPath": repo.get("path")
            }
            log.debug(f"Fs dict: {fs_dict}")
            is_exist = False
            for res in res_list:
                if res.get("deviceSN", "") == esn:
                    res.get("filesystems", []).append(fs_dict)
                    is_exist = True

            if not is_exist:
                res_list.append({"role": role, "deviceSN": esn, "filesystems": [fs_dict]})
        return res_list

    @exter_attack
    def get_restore_param(self):
        return self._restore_job_param

    @exter_attack
    @log_start()
    def get_restore_target_sub_type(self):
        """
        获取要恢复的目标的子类型: DWS-cluster/DWS-database/DWS-schema/DWS-table
        分为普通恢复和细粒度恢复。普通恢复从targetObjects获取；细粒度恢复从subObjects获取
        """
        restore_type = self.get_restore_type()
        if restore_type == RestoreType.NORMAL_RESTORE:
            return self._restore_job_param.get("job", {}).get("targetObject", {}).get("subType")
        elif restore_type == RestoreType.FINE_GRAINED_RESTORE:
            sub_objects = self._restore_job_param.get("job", {}).get("restoreSubObjects", [])
            if not sub_objects:
                log.error(f"Fail to get fine grained restore subtype. main task: {self._main_task_id}.")
                return ""
            return sub_objects[0].get("subType", "")
        return ""

    @exter_attack
    def get_copy_sub_type(self):
        """
        获取用于恢复的副本关联的保护对象类型：DWS-cluster/DWS-database/DWS-schema/DWS-table
        """
        if not self._is_init():
            return ""
        return self._copy.get("protectObject", {}).get("subType", "")

    @exter_attack
    def get_restore_object_name(self):
        return self._restore_job_param.get("job", {}).get("targetObject").get("name")

    @exter_attack
    def get_fine_grained_restore_tables(self):
        """
        用于细粒度恢复:获取要恢复的目标表列表
        返回值: 数据库名称, 新表列表，原表列表
        """
        sub_objects = self._restore_job_param.get("job", {}).get("restoreSubObjects", {})
        if not sub_objects or len(sub_objects) == 0:
            log.error(f"Fail to get fine grained restore tables. main task: {self._main_task_id}.")
            return "", [], []
        target_database = ""
        table_list_result = []
        old_table_list = []
        for obj in sub_objects:
            target_temp_table_name = obj.get("name", "")
            if not target_temp_table_name:
                log.error(f"The table name is empty in subObjects({sub_objects}). main task: {self._main_task_id}")
                return "", [], []
            target_database, table_name = self._parse_single_table_name(target_temp_table_name, target_database)
            if not target_database or not table_name:
                return "", [], []
            table_list_result.append(table_name)
            old_temp_table_name = obj.get("extendInfo", {}).get("oldName", "")
            if not old_temp_table_name:
                log.error(f"The old table name is empty in subObjects({sub_objects}). main task: {self._main_task_id}")
                return "", [], []
            old_temp_db_name = ""
            _, table_name = self._parse_single_table_name(old_temp_table_name, old_temp_db_name)
            if not table_name:
                return "", [], []
            old_table_list.append(table_name)

        return target_database, table_list_result, old_table_list

    @exter_attack
    def get_normal_restore_tables(self):
        """
        用于细粒度恢复:获取要恢复的目标表列表,数据库名称.
        返回值: 数据库名称, 表列表
        """
        target_table_str = self._restore_job_param.get("job").get("targetObject", {}).get("extendInfo", {}).get("table")
        if not target_table_str:
            log.error(f"Fail to get target table info. main task: {self._main_task_id}.")
            return ""
        table_list = target_table_str.split(",")
        if not table_list:
            log.error(f"Fail to get target dabtase info, main taks: {self._main_task_id}.")
            return ""
        splited_list = table_list[0].split("/")
        if not splited_list:
            log.error(f"Fail to get target database, main taks: {self._main_task_id}.")
            return ""
        target_database = splited_list[0]
        target_location = self._restore_job_param.get("job", {}).get("extendInfo", {}).get("targetLocation")
        if target_location == "new":
            target_database = self._restore_job_param.get("job", {}).get("targetObject", {}).\
                            get("extendInfo", {}).get("database")
        return target_database

    @exter_attack
    def get_target_cluster_auth_info(self):
        """
        获取要恢复的目标集群的用户名和环境变量文件路径
        """
        target_object = self._restore_job_param.get("job").get("targetEnv")
        if not target_object:
            log.error(f"Fail to get targetEnv info. main task: {self._main_task_id}.")
            return "", ""
        extend_info = target_object.get("extendInfo")
        if not extend_info:
            log.error(f"Fail to get extendInfo info. main task: {self._main_task_id}.")
            return "", ""
        user = extend_info.get("DwsUser", "")
        env_file = extend_info.get("envFile", "")
        if check_command_injection_ex(user) or check_command_injection_ex(env_file):
            log.error(f"User {user} or env {env_file} is illegal.")
            return "", ""
        return user, env_file

    @exter_attack
    def get_data_path(self):
        data_repo_path = self._get_single_type_repo_path(RepositoryDataTypeEnum.DATA_REPOSITORY)
        data_path = ""
        for item in data_repo_path:
            if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                data_path = item
                break
        if not data_path:
            log.error("Have no can available data path")
            return ""
        data_path = os.path.join(data_path, "data")
        return data_path

    @exter_attack
    def get_available_path(self):
        """
        功能描述：从所有元数据目录和cache仓里拿一套可访问的目录
        """
        cache_path_list = self.get_cache_path()
        if not cache_path_list or len(cache_path_list) == 0:
            log.error("Fail to get cache path")
            return "", ""
        cache_path = ""
        for item in cache_path_list:
            if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                log.info("Get cache path success")
                cache_path = item
                break
        metadata_path = ""
        metadata_path_list = self.get_metadata_path()
        if metadata_path_list:
            for item in metadata_path_list:
                if os.access(item, os.R_OK) and os.access(item, os.W_OK) and check_path_valid(item):
                    metadata_path = item
                    break
        else:
            log.warn("Fail to get metadata path")
        return metadata_path, cache_path

    @exter_attack
    def get_copy_total_size(self):
        """
        获取原备份副本的数据总量, 以MB为单位
        """
        copy_type = self.get_copy_type()
        if copy_type == "s3Archive" or copy_type == "tapeArchive":
            return self._copy.get("extendInfo", {}).get("extendInfo", {}).get("totalDataSize", 0)
        total_size = 0
        for single_copy in self._restore_job_param.get("job", {}).get("copies", []):
            temp_size = single_copy.get("extendInfo", {}).get("totalDataSize", 0)
            total_size += temp_size
        return total_size

    @exter_attack
    def get_nodes_info(self):
        """
        获取要恢复的目标集群使用的集群节点+代理主机
        """
        target_env = self._restore_job_param.get("job").get("targetEnv")
        if not target_env:
            log.error(f"Fail to get target env for main task:{self._main_task_id}")
            return False, [], []
        nodes = target_env.get("nodes")
        if not nodes or len(nodes) == 0:
            log.error(f"Fail to get nodes info. main task:{self._main_task_id}")
            return False, [], []
        intrusive_nodes = []
        non_intrusive_nodes = []
        for node in nodes:
            if node.get("extendInfo").get("nodeType") == "0":
                if "vpcId" in node.get("extendInfo").keys():
                    ip = node.get("extendInfo", {}).get("subNetFixedIp", "")
                else:
                    ip = node.get("endpoint")
                intrusive_nodes.append(ip)
                log.info(f"Insert intrusive_nodes {ip}")
            elif node.get("extendInfo").get("nodeType") == "1":
                if "vpcId" in node.get("extendInfo").keys():
                    ip = node.get("extendInfo", {}).get("subNetFixedIp", "")
                else:
                    ip = node.get("endpoint")
                non_intrusive_nodes.append(ip)
                log.info(f"Insert non_intrusive_nodes {ip}")
            else:
                continue
        return True, intrusive_nodes, non_intrusive_nodes

    @exter_attack
    @log_start()
    def get_subtask_name(self):
        """
        如果是子任务，则获取恢复子任务名称：白名单子任务/恢复业务子任务
        """
        return self._restore_job_param.get("subJob", {}).get("jobName", "")

    @exter_attack
    def get_subtask_id(self):
        """
        如果是子任务，则获取恢复子任务id：白名单子任务/恢复业务子任务
        """
        return self._restore_job_param.get("subJob").get("subJobId")

    @exter_attack
    def get_metadata_destination(self):
        """
        获取元数据存放路径，用于恢复命令中的--metadata-destination参数
        """
        meta_data_path = self._get_info_from_copy_extend_info("metadataPath")
        if check_command_injection_exclude_quote(meta_data_path):
            log.error(f"Meta data path {meta_data_path} is invaild")
            return ""
        return meta_data_path

    @exter_attack
    def get_backup_key(self):
        """
        返回值：备份的backup key.
        """
        backup_key = self._get_info_from_copy_extend_info("dws_backup_id")
        if check_command_injection_exclude_quote(backup_key):
            log.error(f"Backup key {backup_key} is invaild")
            return ""
        return backup_key

    @exter_attack
    def get_backup_hostname(self):
        """
        获取备份副本备份时使用的集群节点主机名
        """
        return self._get_info_from_copy_extend_info("hostname")

    @exter_attack
    def get_backup_host_ip(self):
        """
        获取备份副本备份时使用的集群节点host ip
        """
        return self._get_info_from_copy_extend_info("host_ip")

    @exter_attack
    def is_restore_new_cluster(self):
        """
        用于恢复到新集群场景：返回是否是恢复到新集群。
        True:恢复到新集群
        False：恢复到原集群
        """
        target_cluster_id = self._restore_job_param.get("job").get("targetEnv").get("id")
        source_cluster_id = self._copy.get("protectEnv").get("id")
        log.info(f"Old cluster id: {source_cluster_id}, target_cluster_id:{target_cluster_id}")
        if not target_cluster_id or not source_cluster_id:
            log.error(f"Fail to get target_env id({target_cluster_id}) or source_env id({source_cluster_id}). "
                      f"main task:{self._main_task_id}")
            return False, False
        if target_cluster_id != source_cluster_id:
            return True, True
        return True, False

    @exter_attack
    def get_all_copy_info(self):
        """
        用于增量恢复场景：获取所有恢复依赖的副本
        """
        if not self._is_init():
            return []
        return self._restore_job_param.get("job").get("copies")

    @exter_attack
    def get_restore_type(self):
        """
        获取恢复类型,分为:normalRestore(普通恢复)，instantRestore(即时恢复)，fineGrainedRestore(细粒度恢复)
        """
        job_param = self._restore_job_param.get("job", {}).get("jobParam")
        if not job_param:
            log.error(f"Fail to get jobParam. main task: {self._main_task_id}.")
            return RestoreType.INVALID_RESTORE_TYPE
        return job_param.get("restoreType")

    @exter_attack
    @log_start()
    def get_restore_schema(self):
        """
        用于细粒度恢复:获取要恢复的目标schema列表
        返回值: 数据库名称, schema列表
        """
        sub_objects = self._restore_job_param.get("job", {}).get("restoreSubObjects", {})
        if not sub_objects:
            log.error(f"Fail to get fine grained restore schema. main task: {self._main_task_id}.")
            return "", ""
        if len(sub_objects) > 1:
            log.error(f"Can only select only one schema duplicate. main task: {self._main_task_id}")
            return "", ""
        temp_schema_name = sub_objects[0].get("name", "")
        splited_list = temp_schema_name.split("/")
        if not splited_list or len(splited_list) != 2:
            log.error(f"The schema({temp_schema_name}) is illegal. main task: {self._main_task_id}.")
            return "", ""
        if check_command_injection_exclude_quote(splited_list[0]) or\
                check_command_injection_exclude_quote(splited_list[1]):
            log.error(f"Database name {splited_list[0]} schema name {splited_list[1]} is invalid")
            return "", ""
        return splited_list[0], splited_list[1]

    @exter_attack
    def get_archive_ip_port(self):
        """
        归档直接恢复时，获取dme_archive的IP和端口
        返回值: ip列表，端口
        """
        ip_list = []
        port = ""
        archive_info_list = []
        for repo in self._copy.get("repositories", []):
            if repo.get("protocol", "") != RepoProtocalType.S3 \
                    or repo.get("repositoryType", "") == RepositoryDataTypeEnum.CACHE_REPOSITORY:
                continue
            archive_info_list = repo.get("extendInfo", {}).get("service_info", [])
            if archive_info_list:
                break
        if not archive_info_list:
            log.error(f"Fail to get archive info. main task: {self._main_task_id}")
            return [], ""
        for single_arch in archive_info_list:
            temp_ip = single_arch.get("ip", "")
            temp_port = single_arch.get("port", "")
            if port and port != temp_port:
                log.error(f"All port should be the same. {port}, {temp_port}. main task: {self._main_task_id}")
                return [], ""
            if temp_ip and temp_port:
                ip_list.append(temp_ip)
                port = temp_port
            else:
                log.error(f"The archive ip({temp_ip} or port({temp_port})). main task: {self._main_task_id}")
                return [], ""
        return ip_list, port

    @exter_attack
    def get_archive_ssl_enable(self):
        """
        用于S3直接恢复时，获取是否enable ssl。
        返回值：是否获取成功， 获取的值
        """
        for repo in self._copy.get("repositories", []):
            if repo.get("protocol", "") != RepoProtocalType.S3 \
                    or repo.get("repositoryType", "") == RepositoryDataTypeEnum.CACHE_REPOSITORY:
                continue
            return True, repo.get("extendInfo", {}).get("enable_ssl", "")
        log.error(f"Fail to get archive ssl enable. main task: {self._main_task_id}")
        return False, ""

    @log_start()
    @exter_attack
    def get_archive_prefix(self):
        """
        用于归档到云副本直接恢复。在前置任务获取对象数据时，需要拿到对象数据目标的绝对路径前缀
        """
        remote_path = []
        for repo in self._copy.get("extendInfo", {}).get("repositories", []):
            if repo.get("role", "") == StorageRole.MASTER and \
                    repo.get("type", "") == RepositoryDataTypeEnum.DATA_REPOSITORY:
                remote_path = repo.get("remotePath", [])
                break
        if not remote_path:
            log.error(f"Fail to get archive remote path. main task: {self._main_task_id}")
            return ""
        target_path = ""
        backup_type = self.get_copy_generate_type()
        if backup_type == CopyGenerateType.GENERATE_BY_GDS:
            repository_type = RepositoryDataTypeEnum.DATA_REPOSITORY
        else:
            repository_type = RepositoryDataTypeEnum.META_REPOSITORY
        for path in remote_path:
            if path.get("type", "") == repository_type:
                target_path = path.get("path", "")
                break
        if backup_type == CopyGenerateType.GENERATE_BY_GDS:
            try:
                pos = target_path.index("data")
            except Exception as e:
                log.error(f"Target path not find data, exception {e}")
                return ""
            target_path = target_path[-(len(target_path) - pos):]
            return target_path
        else:
            pos = target_path.index("source_policy")
            target_path = target_path[-(len(target_path) - pos):]
            return os.path.join(target_path, "objectmeta")

    def get_archive_meta_prefix(self):
        """
        用于归档到云副本直接恢复。在前置任务获取对象数据时，需要拿到对象数据目标的绝对路径前缀
        """
        remote_path = []
        for repo in self._copy.get("extendInfo", {}).get("repositories", []):
            if repo.get("role", "") == StorageRole.MASTER and \
                    repo.get("type", "") == RepositoryDataTypeEnum.DATA_REPOSITORY:
                remote_path = repo.get("remotePath", [])
                break
        if not remote_path:
            log.error(f"Fail to get archive remote path. main task: {self._main_task_id}")
            return ""
        target_path = ""
        backup_type = self.get_copy_generate_type()
        repository_type = RepositoryDataTypeEnum.META_REPOSITORY
        for path in remote_path:
            if path.get("type", "") == repository_type:
                target_path = path.get("path", "")
                break
        pos = target_path.index("source_policy")
        target_path = target_path[-(len(target_path) - pos):]
        if backup_type == CopyGenerateType.GENERATE_BY_GDS:
            target_path = os.path.join(target_path)
        else:
            target_path = os.path.join(target_path, "objectmeta")
        return target_path

    def get_archive_data_prefix(self):
        remote_path = []
        for repo in self._copy.get("extendInfo", {}).get("repositories", []):
            if repo.get("role", "") == StorageRole.MASTER and \
                    repo.get("type", "") == RepositoryDataTypeEnum.DATA_REPOSITORY:
                remote_path = repo.get("remotePath", [])
                break
        if not remote_path:
            log.error(f"Fail to get archive remote path. main task: {self._main_task_id}")
            return ""
        target_path = ""
        repository_type = RepositoryDataTypeEnum.DATA_REPOSITORY
        for path in remote_path:
            if path.get("type", "") == repository_type:
                target_path = path.get("path", "")
                break
        try:
            pos = target_path.index("data")
        except Exception as ex:
            log.error(f"Target path not find data, exception {ex}")
            return ""
        target_path = target_path[-(len(target_path) - pos):]
        return target_path


    def log_format(self):
        """
        功能： 日志格式化返回
        """
        return f"pid: {self._pid}, job_id: {self._main_task_id}, sub_job_id: {self.get_subtask_id()}"

    @exter_attack
    def get_intrusive_mode(self):
        ret, intrusive_nodes, non_intrusive_nodes = self.get_nodes_info()
        if not ret or (len(intrusive_nodes) == 0 and len(non_intrusive_nodes) == 0):
            return IntrusiveMode.INVALID_INTRUSIVE_MODE
        if len(non_intrusive_nodes) == 0:
            return IntrusiveMode.INTRUSIVE_MODE
        return IntrusiveMode.NON_INTRUSIVE_MODE

    @exter_attack
    def get_all_data_repo(self):
        """
        获取所有数据仓路径
        """
        data_repo = []
        for repo in self._copy.get("repositories", []):
            if repo.get("repositoryType", "") == RepositoryDataTypeEnum.DATA_REPOSITORY:
                data_repo.append(repo)
        if len(data_repo) == 0:
            log.error("Data repo err.")

        return data_repo

    @exter_attack
    def get_all_data_repo_path(self):
        """
        获取所有数据仓路径
        """
        all_data_path = []
        for repo in self._copy.get("repositories", []):
            if repo.get("repositoryType", "") != RepositoryDataTypeEnum.DATA_REPOSITORY:
                continue
            temp_dir = repo.get("path", [])[0]
            if not check_path_valid(temp_dir):
                log.warning(f"Path {temp_dir} is illegal.")
                return []
            all_data_path.append(temp_dir)
        return all_data_path

    def get_fs_relation(self):
        """
        用于使用复制副本恢复时，获取映射关系
        """
        copy_type = self.get_copy_type()
        if copy_type == "s3Archive" or copy_type == "tapeArchive":
            extend_info = self._copy.get("extendInfo", {}).get("extendInfo", {})
        else:
            extend_info = self._copy.get("extendInfo", {})
        return extend_info.get("fsRelations", {}).get("relations", [])

    @exter_attack
    def _get_single_type_repo_path(self, target_type):
        if not self._is_init():
            return ""
        repositories = self._copy.get("repositories")
        if not repositories or len(repositories) == 0:
            log.error(f"Fail to get repositories, main task: {self._main_task_id}.")
            return ""
        tmp_repo = {}
        for repo in repositories:
            if repo.get("repositoryType") == target_type:
                tmp_repo = repo
                break
        if not tmp_repo:
            log.warn(f"Fail to get type({target_type}), main task: {self._main_task_id}.")
            return ""
        tmp_path = tmp_repo.get("path")
        if not tmp_path:
            log.warn(f"Fail to get path {target_type}, main task: {self._main_task_id}.")
            return ""
        return tmp_path

    @exter_attack
    def _parse_single_table_name(self, table_name, database_name):
        """
        主要用于解析/database/schema/table这种格式的表名，从中提取出database名称，和表名(schema.table格式)
        返回值: 库名称，表名称
        """
        splited_list = table_name.split("/")
        if not splited_list or len(splited_list) != 3:
            log.error(f"The table({table_name}) is illegal. main task: {self._main_task_id}.")
            return "", []
        if database_name and database_name != splited_list[0]:
            log.error(f"The tables are not in the same database. {database_name}, {splited_list[1]}. "
                      f"main task: {self._main_task_id}.")
            return "", []
        return splited_list[0], f"{splited_list[1]}.{splited_list[2]}"


    @exter_attack
    def _get_info_from_copy_extend_info(self, key):
        """
        封装一个通用方法：从副本的extendInfo里拿指定key的值
        """
        if not self._is_init():
            return ""
        copy_type = self.get_copy_type()
        if copy_type == "s3Archive" or copy_type == "tapeArchive":
            extend_info = self._copy.get("extendInfo", {}).get("extendInfo", {})
        else:
            extend_info = self._copy.get("extendInfo", {})
        if not extend_info:
            log.error(f"No extend info in copy, main task: {self._main_task_id}.")
            return ""
        return extend_info.get(key)

    @exter_attack
    def _get_archive_filesystems_from_remote_path(self, repo):
        file_systems = []
        for single_path in repo.get("remotePath", []):
            if single_path.get("type", "") == RepositoryDataTypeEnum.META_REPOSITORY:
                continue
            logic_ips_list = []
            for lif_ip in single_path.get("remoteHost", []):
                logic_ips_list.append({"ip": lif_ip.get("ip")})
            fs_dict = {
                "id": single_path.get("id", ""),
                "name": single_path.get("path").split('/')[1],
                "sharePath": single_path.get("path").split('/')[1],
                "logicIps": logic_ips_list
            }
            file_systems.append(fs_dict)
        log.debug(f"Archive file systems: {file_systems}. main task: {self._main_task_id}")
        return file_systems

    def _get_param_from_file(self):
        log.info("Start to get restore param file.")
        try:
            self._restore_job_param = parse_param_with_jsonschema(self._pid)
        except Exception as e:
            log.error(f"Failed to parse restore param file for {e}, main task: {self._main_task_id}, pid: {self._pid}")
            return False
        else:
            job_info = self._restore_job_param.get("job")
            if not job_info:
                log.error(f"No job info in param file, main task: {self._main_task_id}, pid: {self._pid}")
                return False
            copies_info = job_info.get("copies")
            if not copies_info or len(copies_info) == 0:
                log.error(f"No copy info in param file, main task: {self._main_task_id}, pid: {self._pid}")
                return False
            self._copy = copies_info[len(copies_info) - 1]
            self._initialized = True
            log.info("Analyze param file successfully.")
            return True

    def _is_init(self):
        """
        判断此类是否初始化
        """
        if not self._initialized:
            log.error(f"This class has not been initialized. main task: {self._main_task_id}.")
            return False
        return True
