#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import signal
import time
from copy import deepcopy

from common.cleaner import clear
from common.common import check_path_legal, execute_cmd, output_result_file
from common.common_models import SubJobDetails, ActionResult, SubJobModel, LogDetail, \
    RepositoryPath, ScanRepositories
from common.const import SubJobStatusEnum, BackupTypeEnum, \
    RepositoryDataTypeEnum, DBLogLevel
from common.util.cmd_utils import cmd_format
from common.util.scanner_utils import scan_dir_size
from generaldb.gbase8a import LOGGER
from generaldb.gbase8a.commands.gbase_cmd import GbaseCmd
from generaldb.gbase8a.common.common_models import ModifyPermission, CreateSoftLink
from generaldb.gbase8a.common.const import GbaseBackupSubjobName, \
    CmdStr, GbasePath, BackupLevel, ErrorCode, LastCopyType, RpcParamKey, SubJobPriority, SubJobType, \
    SubJobPolicy, EnvName, LinkTaskType, JsonConstant, NormalErr, PexpectResult
from generaldb.gbase8a.common.copy_link import CopyLink
from generaldb.gbase8a.common.file_operate import FileOperate
from generaldb.gbase8a.common.gbase_common import lookup_process, exec_rc_tool_cmd, \
    remove_file_dir_link, get_local_uuid
from generaldb.gbase8a.common.gcrcman_tool import GcrcmanTool
from generaldb.gbase8a.job_manager import JobManager
from generaldb.gbase8a.service.base_service import MetaServiceWorker
from generaldb.gbase8a.service.resource.resource_service import GBaseInfos
from generaldb.gbase8a.util.gbase_util import GbaseUtil


class BackupTask(MetaServiceWorker):

    def __init__(self, job_manager: JobManager, json_param_object=None):
        if not json_param_object:
            LOGGER.error("Parse params obj is null.")
            raise Exception("Parse params obj is null.")
        super().__init__(job_manager, json_param_object)
        self.cmd = GbaseCmd(self.job_manager.pid)
        self._pid = job_manager.pid
        self._job_id = job_manager.job_id
        self._sub_job_id = job_manager.sub_job_id
        self._json_param_object = json_param_object
        self.host_name = ''
        self.host_pwd = ''
        self.db_pwd = ''
        self.gcluster_base_path = ''
        self._progress = 0
        self._data_size = 0
        self._subjob_name = ''
        self._copys = []
        self._action_code = 0
        self._repositories = []
        self._data_area = ''
        self._cache_area = ''
        self._meta_area = ''
        self._copy_id = ''
        self._job_status = SubJobStatusEnum.RUNNING.value
        self._backup_path = ''
        self._query_progress_interval = 15
        self._db_name = ''
        self._resource_type = ''
        self._protect_object = {}
        self._vc_name = ''
        self._backup_type = ''
        self.init_param()
        self.init_backup_func_dict()

    @staticmethod
    def resolve_copy_info(info):
        info = info.strip('\r\n').split('\r\n')
        # 3: 前两行为默认行
        if len(info) < 3:
            return False, ()
        msg = info[-1].split('\t')
        # 4：期望结果为[0,0,0,"level_info"]
        if len(msg) != 4:
            return False, ()
        cycle, point, level, backup_time = msg
        return True, (cycle, point, level, backup_time)

    def get_log_comm(self):
        return f"pid:{self._pid} jobId:{self._job_id} subjobId:{self._sub_job_id}."

    def init_param(self):
        self._cache_area = self._json_param_object.get_cache_path()
        self._data_area = self._json_param_object.get_data_path()
        self._meta_area = self._json_param_object.get_meta_path()
        self._db_name = self._json_param_object.get_backup_db_name()
        self._resource_type = self._json_param_object.get_resource_type()
        self._repositories = self._json_param_object.get_job_repositories()
        self._backup_type = self._json_param_object.get_backup_type()
        self._subjob_name = self._json_param_object.get_subjob_jobname()
        self._vc_name = self._json_param_object.get_vc_name()
        self._copys = self._json_param_object.get_copys()
        self._protect_object = self._json_param_object.get_job_protectobject()
        self._copy_id = self._json_param_object.get_copy_id()
        self._backup_path = os.path.join(GbasePath.GBASE_LINK_PATH, str(self._job_id))

    def init_backup_func_dict(self):
        self.backup_func_dict = {
            GbaseBackupSubjobName.SUB_BACKUP: self.backup_sub_job,
            GbaseBackupSubjobName.LINK: self.link,
            GbaseBackupSubjobName.CONTACT_LINK: self.contact_link,
            GbaseBackupSubjobName.MKDIR: self.make_backup_dir,
        }

    def action_result(self, message=''):
        """
        将actionResult写入到结果文件,供框架读取
        :return:
        """
        action_result = ActionResult(code=self._action_code, body_err=self._action_code, message=message).dict(
            by_alias=True)
        self.job_manager.update(action_result)

    def update_progress(self, task_status, progress, log_detail=None):
        progress_str = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id, taskStatus=task_status,
                                     progress=progress, logDetail=log_detail)
        self.job_manager.update(progress_str)

    def backup_allow(self):
        ret, err = self.cmd.check_os_user()
        if not ret:
            LOGGER.error(f"auth failed, error:%s", err)
            return self.update_action_result(code=NormalErr.FALSE.value,
                                             body_err_code=ErrorCode.ERROR_AUTH,
                                             msg="auth failed")
        if not GbaseUtil.check_service_status(self.cmd.get_os_user()):
            error = "Error service status"
            LOGGER.error(error)
            return self.update_action_result(code=NormalErr.FALSE.value, body_err_code=ErrorCode.ERR_DB_SERVICES.value,
                                             msg=error)
        if not self.exist_database(self._db_name):
            LOGGER.error("DB not exist:%s", self._db_name)
            return self.update_action_result(code=NormalErr.FALSE.value, body_err_code=ErrorCode.ERR_DB_NOT_EXIST.value,
                                             err_param=[self._db_name])
        if not GbaseUtil.has_pexpect(self.cmd.get_os_user()):
            error = "No module named 'pexpect'"
            LOGGER.error(error)
            return self.update_action_result(code=NormalErr.FALSE.value, body_err_code=ErrorCode.ERROR_NO_PEXPECT.value,
                                             msg=error)
        return self.action_result()

    def exist_database(self, target_db):
        expect_envs = [(self.cmd.get_db_pwd_key(), PexpectResult.DB_LOGIN)]
        os_user = self.cmd.get_os_user()
        db_user = self.cmd.get_db_user()
        ret, out = self.cmd.show_databases(os_user, db_user, expect_envs)
        if not ret:
            LOGGER.error("Failed to show database, error:%s", out)
            return False
        lines = out.strip("\r\n").split("\r\n")
        for line in lines[3 - len(lines):-1]:
            db_name = line.replace("|", "").replace(" ", "")
            if db_name == target_db:
                return True
        return False

    def backup_pre_task(self):
        return self.action_result()

    def check_backup_job_type(self):
        if self._backup_type == BackupTypeEnum.FULL_BACKUP:
            return self.action_result()
        last_copy_info = self.get_last_copy_info()
        if not last_copy_info:
            self._action_code = ErrorCode.ERROR_INCREMENT_TO_FULL.value
            return self.update_action_result(code=NormalErr.FALSE.value,
                                             body_err_code=ErrorCode.ERROR_INCREMENT_TO_FULL.value)
        if self._backup_type == BackupTypeEnum.INCRE_BACKUP:
            if not self.check_topology():
                self._action_code = ErrorCode.ERROR_INCREMENT_TO_FULL.value
                return self.update_action_result(code=NormalErr.FALSE.value,
                                                 body_err_code=ErrorCode.ERROR_INCREMENT_TO_FULL.value)
        return self.action_result()

    def check_topology(self):

        return True

    def gen_sub_job(self):
        self.update_progress(SubJobStatusEnum.RUNNING.value, 1)
        nodes = self._json_param_object.get_nodes_info()
        backup_node_ip = self.get_backup_node_ip()
        sub_job_array = []
        if not backup_node_ip:
            return sub_job_array
        main_task_generator_status = True
        for node in nodes:
            host_id = node.get("id", "")
            job_type = GbaseBackupSubjobName.LINK
            job_priority = SubJobPriority.JOB_PRIORITY_2
            sub_job = self.build_sub_job(job_priority, host_id, job_type)
            sub_job_array.append(sub_job)
            if not main_task_generator_status:
                continue
            node_ip = node.get(JsonConstant.EXTEND_INFO, {}).get(JsonConstant.AGENT_IP_LIST, '')
            for ip_info in backup_node_ip:
                if ip_info not in node_ip:
                    continue
                job_type = GbaseBackupSubjobName.SUB_BACKUP
                job_priority = SubJobPriority.JOB_PRIORITY_3
                sub_job = self.build_sub_job(job_priority, host_id, job_type)
                sub_job_array.append(sub_job)
                job_type = GbaseBackupSubjobName.MKDIR
                job_priority = SubJobPriority.JOB_PRIORITY_1
                sub_job = self.build_sub_job(job_priority, host_id, job_type)
                sub_job_array.append(sub_job)
                main_task_generator_status = False
                break
        LOGGER.info("Sub-task splitting succeeded.sub-task num:%s", len(sub_job_array))
        self.job_manager.update(sub_job_array)
        return sub_job_array

    def get_backup_node_ip(self):
        ip_list = []
        cluster_info = self._json_param_object.get_protectenv_extendinfo_cluster_info()
        nodes = cluster_info.get("nodes")
        if not nodes:
            return ip_list
        for node in nodes:
            if node.get("gcware_status") and node.get("gcluster_status"):
                ip_addr = node.get("ip_address")
                if not ip_addr:
                    LOGGER.error("Failed to obtain the node IP address from the parameter.", self.get_log_comm())
                    return []
                ip_list.append(ip_addr)
        return ip_list

    def build_sub_job(self, job_priority, host_id, job_type):
        return SubJobModel(jobId=self._job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, execNodeId=host_id,
                           jobPriority=job_priority, jobName=job_type, policy=SubJobPolicy.FIXED_NODE.value,
                           ignoreFailed=False).dict(by_alias=True)

    def backup(self):
        self.update_progress(SubJobStatusEnum.RUNNING.value, 1)
        self.prepare_backup_path()
        LOGGER.info("self._subjob_name:%s.%s.", self._subjob_name, self.get_log_comm())
        func = self.backup_func_dict.get(self._subjob_name, None)
        if not func:
            LOGGER.error("Unknown sub job.%s.", self.get_log_comm())
            self.update_progress(SubJobStatusEnum.FAILED.value, 100)
            return False
        LOGGER.info("Starting execute function:%s", func)
        ret = func()
        LOGGER.info("End execute function:%s, ret:%s", func, ret)
        if not ret:
            self.update_progress(SubJobStatusEnum.FAILED.value, 100)
            LOGGER.error("Exec sub job failed.%s.", self.get_log_comm())
            return False
        self.update_progress(SubJobStatusEnum.COMPLETED.value, 100)
        return True

    def backup_abort_job(self):
        key_list = ['', '', '-d', self._backup_path]
        pid = lookup_process(key_list)
        if not pid:
            self.action_result()
            return True
        try:
            os.kill(pid, signal.SIGKILL)
        except Exception as e_info:
            LOGGER.warning("Failed to stop the process.%s.", self.get_log_comm())
        pid = lookup_process(key_list)
        if pid:
            self.action_result()
            return False
        self.action_result()
        return True
    
    def query_scan_repositories(self):
        LOGGER.info(f"Start to query_scan_repositories task.")
        log_meta_copy_repo = RepositoryPath(repositoryType=RepositoryDataTypeEnum.META_REPOSITORY,
                                            scanPath=self._meta_area)
        log_data_repo = RepositoryPath(repositoryType=RepositoryDataTypeEnum.DATA_REPOSITORY,
                                       scanPath=self._data_area)
        scan_repos = ScanRepositories(scanRepoList=[log_data_repo, log_meta_copy_repo], savePath=self._meta_area)
        output_result_file(self._pid, scan_repos.dict(by_alias=True))
        LOGGER.info(f"Query_scan_repositories task exec success.")
        

    def backup_post_job(self):
        self.update_progress(SubJobStatusEnum.RUNNING.value, 0)
        self.contact_link()
        if self._backup_type != BackupTypeEnum.FULL_BACKUP:
            copy_id_list = self.pre_copy_id_list()
            copy_link = CopyLink(copy_id_list, self._data_area, '', LinkTaskType.DELLINK)
            if not copy_link.main():
                self.update_progress(SubJobStatusEnum.FAILED.value, 100)
                LOGGER.error("Failed to dell link.%s.", self.get_log_comm())
                return False
            try:
                # 增量副本中偶现存在软连接未清除干净场景，做此加固
                if not self.hardening_clear_soft_link():
                    LOGGER.error(f"clear soft link exception")
            except Exception as exception:
                LOGGER.error(f"some exception: {exception}")
        self.update_progress(SubJobStatusEnum.COMPLETED.value, 100)
        return True

    def hardening_clear_soft_link(self):
        # 清理增量备份副本中可能存在的软连接
        cur_backup_path = os.path.join(self._data_area, str(self._job_id))
        find_soft_link_cmd = f"find {cur_backup_path} -type l"
        ret, out_info, err_info = execute_cmd(find_soft_link_cmd)
        LOGGER.info(f"soft link {out_info}")
        if not ret:
            LOGGER.error(f"exec find link filed, err: {err_info}")
            return False
        soft_link_path_list = out_info.strip('\r\n').split('\n')
        if not soft_link_path_list:
            LOGGER.info("There is no residual soft connection.")
            return True
        for soft_link_path in soft_link_path_list:
            if not remove_file_dir_link(soft_link_path):
                if not os.path.exists(soft_link_path):
                    LOGGER.info(f"{soft_link_path} not exist")
                    continue
                LOGGER.error(f"can not remove this path {soft_link_path}")
                continue
        return True

    def make_backup_dir(self):
        host_name = self.cmd.get_os_user()
        path = os.path.join(self._data_area, self._copy_id)
        path_info = ModifyPermission(path=path, user=host_name, path_check_func=check_path_legal,
                                     path_check_func_args=(path, GbasePath.GBASE_FILESYSTEM_MOUNT_PATH))
        ret = FileOperate.mkdir_change_permission(path_info)
        if not ret:
            LOGGER.error("Failed to create the backup copy directory.%s.", self.get_log_comm())
            return False
        return True

    def link(self):
        host_name = self.cmd.get_os_user()
        path_info = ModifyPermission(path=GbasePath.GBASE_LINK_PATH, user=host_name, path_check_func=check_path_legal,
                                     path_check_func_args=(GbasePath.GBASE_LINK_PATH,
                                                           GbasePath.GBASE_FILESYSTEM_MOUNT_PATH))
        if os.path.exists(GbasePath.GBASE_LINK_PATH):
            if not FileOperate.change_path_permission(path_info):
                return False
        elif not FileOperate.mkdir_change_permission(path_info):
            LOGGER.error("Create local link path fail,%s.", self.get_log_comm())
            return False
        host_id = get_local_uuid()
        if not host_id:
            LOGGER.error("Get local uuid fail,%s.", self.get_log_comm())
            return False
        link_path = os.path.join(self._data_area, self._copy_id, host_id)
        link_path_info = ModifyPermission(path=link_path,
                                          user=host_name,
                                          path_check_func=check_path_legal,
                                          path_check_func_args=(link_path, GbasePath.GBASE_FILESYSTEM_MOUNT_PATH)
                                          )
        if not FileOperate.mkdir_change_permission(link_path_info):
            LOGGER.error("Create link path fail,%s.", self.get_log_comm())
            return False
        if self._backup_type != BackupTypeEnum.FULL_BACKUP:
            copy_id_list = self.pre_copy_id_list()
            copy_link = CopyLink(copy_id_list, self._data_area, host_name, LinkTaskType.BACKUP)
            if not copy_link.main():
                LOGGER.error("Failed to construct the backup directory.%s.", self.get_log_comm())
                return False
        path = os.path.join(GbasePath.GBASE_LINK_PATH, self._job_id)
        if os.path.exists(path):
            remove_file_dir_link(path)
        path_info = CreateSoftLink(src_path=link_path,
                                   dst_path=path,
                                   src_user=host_name)
        if not FileOperate.create_soft_link(path_info):
            LOGGER.error("Create link fail,%s.", self.get_log_comm())
            return False
        if not os.path.exists(path):
            LOGGER.error("Create link fail,%s.", self.get_log_comm())
            return False
        self._backup_path = path
        LOGGER.info("Create link success,%s.", self.get_log_comm())
        return True

    def contact_link(self):
        path = os.path.join(GbasePath.GBASE_LINK_PATH, self._job_id)
        if os.path.exists(path):
            try:
                remove_file_dir_link(path)
            except Exception as e_str:
                LOGGER.error("Failed to delete the backup directory.%s.", self.get_log_comm())
                return False
        return True

    def backup_sub_job(self):
        """
        执行备份子任务
        """
        if self._backup_type == BackupTypeEnum.INCRE_BACKUP:
            backup_level = BackupLevel.INCRE
        elif self._backup_type == BackupTypeEnum.FULL_BACKUP:
            backup_level = BackupLevel.FULL
        else:
            LOGGER.error("Incorrect backup type.%s.", self.get_log_comm())
            return False
        vc_name = ''
        if self._vc_name:
            vc_name = f"{self._vc_name}."
        LOGGER.info(f"Begin exec backup task...")
        cmd = cmd_format(CmdStr.DATABASE_BACKUP, vc_name, self._db_name, backup_level)
        ret, message = self.exec_backup_cmd(cmd)
        clear(self.host_pwd)
        clear(self.db_pwd)
        if not ret:
            LOGGER.error("Failed to execute backup cmd:%s", cmd)
            log = LogDetail(logInfo='', logInfoParam=[], logTimestamp=0,
                            logDetail=ErrorCode.EXEC_BACKUP_RECOVER_CMD_FAIL,
                            logDetailParam=["Backup", message], logDetailInfo=[], logLevel=DBLogLevel.ERROR)
            self.update_progress(SubJobStatusEnum.FAILED.value, 100, [log])
            return False
        return True

    def exec_backup_cmd(self, cmd):
        """
        执行备份命令，并解析返回结果判断命令执行成功与否
        """
        self.db_pwd = self.cmd.get_db_pwd()
        self.host_pwd = self.cmd.get_os_pwd()
        self.host_name = self.cmd.get_os_user()
        self.gcluster_base_path = GbaseUtil.get_gcrcman_path(self.cmd, self.host_name)
        gcrcman_tool = GcrcmanTool(self.host_name, self.host_pwd, self.db_pwd, self.gcluster_base_path, timeout=None)
        result, out_info = gcrcman_tool.run_gcrcman_tool(self._backup_path, cmd)
        if not result:
            LOGGER.error("Failed to run the backup command.out_info:%s. %s.", out_info, self.get_log_comm())
            self.parsing_error_scenarios(out_info)
            return False, out_info
        LOGGER.info("Successful to execute command:%s, result:%s, out:%s", cmd, result, out_info)
        node_num = self.get_cluster_backup_node_num()
        if not node_num:
            LOGGER.error("Failed to get node num:%s", node_num)
            return False, out_info
        out_info_strip = out_info.strip('\n').split('\n')
        if len(out_info_strip) <= 2 + node_num:
            LOGGER.error("Failed to parse the backup result.%s.", self.get_log_comm())
            return False, out_info
        out_info_list = out_info_strip[-(2 + node_num):-2]
        for info in out_info_list:
            if 'backup database success' not in info:
                LOGGER.error("%s Backup failed.err_info:%s.", self.get_log_comm(), info)
                return False, out_info
        if not self.upload_copy_info():
            LOGGER.error("Failed to report the copy information.%s.", self.get_log_comm())
            return False, out_info
        return True, "Success"

    def get_cluster_backup_node_num(self):
        EnvName.DB_USER_NAME = "job_protectedEnv_auth_authKey"
        EnvName.DB_PASSWORD = "job_protectedEnv_auth_authPwd"
        EnvName.CUSTOM_SETTINGS = f"job_protectEnv_auth_extendInfo_authCustomParams"
        inst = GBaseInfos(self._pid, self.cmd)
        all_node_info = inst.get_all_nodes_info()
        node_info = all_node_info.get("nodes", [])
        if not node_info:
            LOGGER.error("Failed to obtain cluster node information.%s.", self.get_log_comm())
        node_num = len(node_info)
        return node_num

    def upload_copy_info(self):
        """主动上报副本信息"""
        cmd = CmdStr.SHOW_BACKUP
        gcrcman_tool = GcrcmanTool(self.host_name, self.host_pwd, self.db_pwd, self.gcluster_base_path, timeout=None)
        reslut, out_info = gcrcman_tool.run_gcrcman_tool(self._backup_path, cmd)
        if not reslut:
            LOGGER.error("Failed to query the copy information,.%s.", self.get_log_comm())
            self.parsing_error_scenarios(out_info)
            return False
        ret, info = self.resolve_copy_info(out_info)
        if not ret:
            LOGGER.error("Failed to resolve copy info:%s", out_info)
            return False
        copy_id_list = self.pre_copy_id_list()
        if not copy_id_list:
            LOGGER.error("Failed to obtain the ID of the copy.%s.", self.get_log_comm())
            return False
        cycle_info, point_info, level_info, time_info = info
        copy_info = {
            "extendInfo": {
                "backupTime": int(time.time()),
                "beginTime": None,
                "endTime": None,
                "beginSCN": None,
                "endSCN": None,
                "copyIdList": copy_id_list,
                "cycle": cycle_info,
                "point": point_info,
                "level": level_info,
                JsonConstant.COPY_ID: self._copy_id
            }
        }
        out_repositories = self.get_archive_path()
        if not out_repositories:
            LOGGER.error("Failed to set the archive path.")
            return False
        copy_info[JsonConstant.REPOSITORIES] = out_repositories
        copy_info = {"copy": copy_info, "jobId": self._job_id}
        try:
            exec_rc_tool_cmd(self._job_id, RpcParamKey.REPORT_COPY_INFO, copy_info)
        except Exception as err_info:
            LOGGER.error(
                "An exception occurred when invoking the tool for reporting copy information..%s.", self.get_log_comm())
            return False
        self.report_size()
        return True

    def report_size(self):
        # 上报副本信息之前上报备份任务进度(数据量给UBC)
        copy_path = os.path.join(self._data_area, self._copy_id)
        flag, data_size = scan_dir_size(self._job_id, copy_path)
        LOGGER.info("GBase backup data size is: %s, jod id: %s, flag: %s", data_size, self._job_id, flag)
        progress = 100
        if not flag:
            data_size = 0
            LOGGER.error("Query copy data failed, data_size:%s jod id: %s", self._job_id, data_size)
        # status=6表示成功
        self.update_report_result(SubJobStatusEnum.COMPLETED.value, progress, data_size)
        LOGGER.info("Update report backup task progress completed.")

    def pre_copy_id_list(self):
        if self._backup_type == BackupTypeEnum.FULL_BACKUP:
            return [self._copy_id]
        last_copy_info = self.get_last_copy_info()
        copy_id_list = last_copy_info.get(JsonConstant.EXTEND_INFO, {}).get(JsonConstant.COPY_ID_LIST, [])
        if not copy_id_list:
            return []
        copy_id_list.append(self._copy_id)
        return copy_id_list

    def get_archive_path(self):
        # 获取repositories信息
        all_repositories = deepcopy(self._repositories)
        out_repositories = []
        for repository in all_repositories:
            repository_type = repository.get(JsonConstant.REPOSITORY_TYPE)
            if repository_type == RepositoryDataTypeEnum.DATA_REPOSITORY:
                old_remote_path = repository.get(JsonConstant.REMOTE_PATH)
                repository[JsonConstant.REMOTE_PATH] = os.path.join(old_remote_path, self._copy_id)
                out_repositories.append(repository)
        return out_repositories

    def parsing_error_scenarios(self, out_info):
        if "host password wrong" in out_info:
            LOGGER.error("Host password wrong.%s.", self.get_log_comm())
            return False
        elif "database passwd is wrong" in out_info:
            LOGGER.error("Host password wrong.%s.", self.get_log_comm())
            return False
        return False

    def prepare_backup_path(self):
        path = os.path.join(GbasePath.GBASE_LINK_PATH, self._job_id)
        self._backup_path = path
        return True

    def get_last_copy_info(self, backup_type=None):
        if not backup_type:
            backup_type = self._backup_type
        last_copy_type = LastCopyType.last_copy_type_dict.get(backup_type, [])
        if not last_copy_type:
            LOGGER.error("Backup type err.%s.", self.get_log_comm())
            return {}
        input_param = {
            RpcParamKey.APPLICATION: self._protect_object,
            RpcParamKey.TYPES: last_copy_type,
            RpcParamKey.COPY_ID: "",
            RpcParamKey.JOB_ID: self._job_id

        }
        return exec_rc_tool_cmd(f"{self._job_id}_{self._sub_job_id}", RpcParamKey.QUERY_PREVIOUS_CPOY, input_param)
