#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import pathlib
import re
import time

from shutil import copy

from common.util.cmd_utils import cmd_format
from gaussdbt.backup.check_backup_type import check_database_restore, check_failover
from gaussdbt.commons.const import BackupStepEnum, RoachConstant, ErrorCode, NormalErr
from gaussdbt.commons.database_common import gaussdbt_check_user_name_and_injection
from gaussdbt.commons.gaussdbt_common import record_err_code, check_mount_point_use, check_uid_consistency
from gaussdbt.commons.gaussdbt_param_protection import ParamProtection
from gaussdbt.commons.models import MetaDataInfo
from gaussdbt.commons.roach_meta_info import mount_bind_backup_path, umount_bind_backup_path, save_backup_key, \
    write_progress_file
from gaussdbt.resource.gaussdbt_resource import GaussCluster
from common.common import execute_cmd, change_dir_permission, convert_time_to_timestamp, \
    check_command_injection_exclude_quote, touch_file, read_tmp_json_file, check_path_legal
from common.common_models import Copy, CopyExtendInfo
from common.const import BackupTypeEnum, SubJobStatusEnum, RepositoryDataTypeEnum, DeployType, RoleType
from common.file_common import change_path_permission
from common.logger import Logger
from common.util.exec_utils import exec_mkdir_cmd, exec_overwrite_file, su_exec_rm_cmd, exec_cp_cmd, exec_append_file, \
    exec_cp_dir_no_user

log = Logger().get_logger("gaussdbt_plugin.log")


def set_end_time(info, log_copy_info, std_out):
    words = info.split()
    if len(words) < 5:
        for info_tmp in std_out.split('\n'):
            if "END TIME" in info_tmp:
                words_tmp = info_tmp.split()
                log_copy_info.end_time = convert_time_to_timestamp(f"{words_tmp[3]} {words_tmp[4]}")
    else:
        log_copy_info.end_time = convert_time_to_timestamp(f"{words[3]} {words[4]}")


class BackUP:

    def __init__(self, repositories_info, backup_id, backup_type, user_name, pid, repositories, is_stand):
        self.repositories = repositories
        self.pid = pid
        self.data_area = os.path.join(repositories_info.get("data_repository", [""])[0], "data")
        self.meta_area = os.path.join(repositories_info.get("meta_repository", [""])[0], "meta")
        self.meta_path = os.path.join(repositories_info.get("meta_repository", [""])[0])
        self.data_path = os.path.join(repositories_info.get("data_repository", [""])[0])
        self.cache_area = repositories_info.get("cache_repository", [""])[0]
        self.backup_type = backup_type
        self.log_area = ""
        self.log_arch = ""
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            self.log_area = RoachConstant.ROACH_LOG_FILE_PATH
            self.mount_log_area = repositories_info.get("log_repository", [""])[0]
            self.log_arch = os.path.join(self.log_area, RoachConstant.ROACH_ARCH)
        self.backup_id = backup_id
        self.user_name = user_name
        self.media_path = os.path.join(RoachConstant.ROACH_DATA_FILE_PATH, RoachConstant.ROACH_DATA)
        self.metadata_path = RoachConstant.ROACH_META_FILE_PATH
        self.mount_media_path = os.path.join(self.data_area, RoachConstant.ROACH_DATA)
        self.mount_metadata_path = self.meta_area
        self.backup_on_standby = is_stand
        self.sub_job_id = ""
        self.parallel_process = ""

    def set_sub_job_id_and_parallel_process(self, sub_job_id, parallel_process):
        self.sub_job_id = sub_job_id
        self.parallel_process = parallel_process

    def clean_directory(self, directory_path, origin_list):
        directory_list = os.listdir(directory_path)
        change_path_permission(directory_path, self.user_name)
        for directory in directory_list:
            if directory not in origin_list:
                os.chdir(directory_path)

    def set_sub_dirs_for_cluster_nodes(self):
        """
        设置集群节点备份挂载子目录
        """
        deploy_type = GaussCluster.get_deploy_type()
        if deploy_type != DeployType.CLUSTER_TYPE:
            log.info("Not cluster type, no need to set sub dirs.")
            return
        if check_uid_consistency(self.user_name, self.pid):
            log.info("Uid is consistent, no need to set sub dirs.")
            return
        log.info("Start to set sub dirs for cluster nodes mount bind.")
        local_ip = GaussCluster.get_endpoint_by_hostname()
        self.meta_path = os.path.join(self.meta_path, local_ip)
        self.data_path = os.path.join(self.data_path, local_ip)
        if not os.path.exists(self.data_path):
            log.info(f"Start to create data dir: {self.data_path}")
            try:
                os.makedirs(self.data_path, mode=0o755)
            except Exception as ex:
                log.error(f"Make directory {self.data_path} failed, error: {ex}")
        if not os.path.exists(self.meta_path):
            log.info(f"Start to create data dir: {self.meta_path}")
            try:
                os.makedirs(self.meta_path, mode=0o755)
            except Exception as ex:
                log.error(f"Make directory {self.meta_path} failed, error: {ex}")
        self.data_area = os.path.join(self.data_path, "data")
        self.meta_area = os.path.join(self.meta_path, "meta")
        self.mount_media_path = os.path.join(self.data_area, RoachConstant.ROACH_DATA)
        self.mount_metadata_path = self.meta_area
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            if not os.path.exists(self.mount_log_area):
                log.info(f"Start to create data dir: {self.mount_log_area}")
                try:
                    os.makedirs(self.mount_log_area, mode=0o755)
                except Exception as ex:
                    log.error(f"Make directory {self.mount_log_area} failed, error: {ex}")
            sub_log_area = os.path.join(self.mount_log_area, local_ip)
            self.mount_log_area = sub_log_area
        log.info("Set sub dirs successfully.")

    def pre_backup(self):
        """
        前置任务
        :return:
        """
        log.info("Start to exec pre task")
        if not self.user_name:
            log.error("Failed to get user name")
            return False
        umount_bind_backup_path()
        try:
            # 获取数据库用户ID和组ID
            user_id, group_id = GaussCluster.get_user_info(self.user_name)
        except Exception:
            log.exception("Exception when exec pre task")
            return False
        # 检查数据目录与元数据目录
        change_dir_permission(self.data_area, user_id, group_id)
        change_dir_permission(self.meta_area, user_id, group_id)
        # 检查数据目录下roach目录
        change_dir_permission(self.mount_media_path, user_id, group_id)
        change_dir_permission(self.mount_metadata_path, user_id, group_id)
        # 检查临时目录
        change_dir_permission(self.cache_area, user_id, group_id)
        if GaussCluster.get_deploy_type() == DeployType.CLUSTER_TYPE:
            change_path_permission(self.cache_area, self.user_name, mode=0o777)
        if self.log_area:
            change_dir_permission(self.log_area, user_id, group_id)
            change_dir_permission(self.mount_log_area, user_id, group_id)
        # 检查日志备份
        err_code_file = os.path.join(self.cache_area, f'{self.backup_id}errcode')
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            if not check_database_restore(self.meta_path, self.pid) or not check_failover(self.pid):
                record_err_code(ErrorCode.ERR_RESTORED.value, err_code_file, self.user_name)
                log.info("Failed to exec pre task")
        else:
            record_err_code(NormalErr.NO_ERR.value, err_code_file, self.user_name)
            log.info("Succeed to exec pre task")
        result_file = os.path.join(self.cache_area, BackupStepEnum.PRE_TASK_PROGRESS.value)
        pathlib.Path(result_file).touch()
        return True

    def backup_progress(self):
        """
        解析备份进度
        :return:
        """
        progress = None
        status = None
        progress_file = os.path.join(self.cache_area, self.backup_id)
        try:
            with open(progress_file, "r", encoding='UTF-8') as f:
                data = f.read()
        except FileNotFoundError:
            log.exception("Failed to read progress file, File Not Found")
            return True, "5", SubJobStatusEnum.RUNNING.value
        except IOError:
            log.exception("Failed to read progress file")
            return False, "100", SubJobStatusEnum.FAILED.value
        if "[FAILURE]" in data or 'Failed' in data:
            return True, "100", SubJobStatusEnum.FAILED.value
        if "Performing backup completed" in data:
            progress = "100"
            status = SubJobStatusEnum.COMPLETED.value
        elif "%" not in data:
            progress = "5"
            status = SubJobStatusEnum.RUNNING.value
        else:
            progress_info = data.split("\n")
            for info in progress_info:
                if "%" in info:
                    progress = re.findall("\d+\.\d+", data)[0].split('.')[0]
                    status = SubJobStatusEnum.RUNNING.value
                    break
        if not progress:
            log.error("Failed to check progress")
            return False, "100", SubJobStatusEnum.FAILED.value
        return True, progress, status

    def post_backup(self):
        """
        后置任务：清理临时文件
        :return:
        """
        log.info("Start to exec post_backup task")
        result_file = os.path.join(self.cache_area, f"{BackupStepEnum.POST_TASK_PROGRESS.value}_{self.sub_job_id}")
        if os.path.exists(result_file):
            log.info(f"Tmp file: {result_file} exists, and remove")
            su_exec_rm_cmd(result_file)
        pathlib.Path(result_file).touch()
        write_progress_file("Running", result_file)
        while (check_mount_point_use(RoachConstant.ROACH_DATA_FILE_PATH)
               or check_mount_point_use(RoachConstant.ROACH_META_FILE_PATH)
               or check_mount_point_use(RoachConstant.ROACH_LOG_FILE_PATH)):
            time.sleep(5)
        umount_bind_backup_path()
        write_progress_file("Completed", result_file)
        log.info(f"Succeed to exec post_backup task")
        return True

    def get_backup_key(self):
        """
        获取backup_key
        :return:
        """
        backup_key = None
        file_path = os.path.join(self.cache_area, self.backup_id)
        if not os.path.exists(file_path):
            log.error(f"File: {file_path} not exist, can not get backup_key")
            return backup_key
        with open(file_path, "r", encoding='UTF-8') as f:
            for line in f.readlines():
                if "backup key" in line:
                    backup_key = line.split(',')[1].split(":")[1].strip()
                    break
        return backup_key

    def get_log_copy_info(self, backup_key):
        log.info("Start to query log copy info")
        log_copy_info = CopyExtendInfo()
        roach_home = GaussCluster.get_roach_home(self.user_name)
        if check_command_injection_exclude_quote(roach_home):
            log.error(f"The roach_home parameter contains special characters.")
            return log_copy_info
        param = cmd_format("--metadata-destination {} --backup-key {} --arch", self.metadata_path, backup_key)
        cmd = f'su - {self.user_name} -c "{roach_home}{RoachConstant.ROACH_SHOW} {param}"'
        return_code, std_out, std_err = execute_cmd(cmd)
        if return_code != "0":
            log.error(f"Failed to exec roach show, std_err: {std_err}")
            return log_copy_info
        log_copy_info.backup_index_id = backup_key
        log_copy_info.data_path = str([os.path.join(RoachConstant.ROACH_ARCH, backup_key)])
        first_time_file = os.path.join(self.meta_area, RoachConstant.FIRST_TIME_FILE_NAME)
        for info in std_out.split('\n'):
            if "FIRST TIME" in info:
                words = info.split()
                if len(words) < 5:
                    first_time_json = read_tmp_json_file(first_time_file)
                    log_copy_info.begin_time = int(first_time_json.get("first_time"))
                else:
                    log_copy_info.begin_time = convert_time_to_timestamp(f"{words[3]} {words[4]}")
            if "NEXT TIME" in info:
                set_end_time(info, log_copy_info, std_out)
        log_copy_info.backup_time = log_copy_info.end_time
        log.info("Succeed to get log copy info")
        exec_overwrite_file(first_time_file, {'first_time': log_copy_info.end_time})
        return log_copy_info

    def get_copy_info(self, backup_key):
        log.info("Start to query normal copy info")
        copy_info = CopyExtendInfo()
        roach_home = GaussCluster.get_roach_home(self.user_name)
        if check_command_injection_exclude_quote(roach_home):
            log.error(f"The roach_home parameter contains special characters.")
            return copy_info
        param = cmd_format('--metadata-destination {} --backup-key {}', self.metadata_path, backup_key)
        cmd = f'su - {self.user_name} -c "{roach_home}{RoachConstant.ROACH_SHOW} {param}"'
        return_code, std_out, std_err = execute_cmd(cmd)
        if return_code != "0":
            log.error(f"Failed to exec roach show, std_err: {std_err}")
            return copy_info
        copy_info.backup_index_id = backup_key
        backup_data_path = os.path.join("data", RoachConstant.ROACH_DATA, "roach", backup_key)
        copy_info.data_path = str([backup_data_path])
        for info in std_out.split('\n'):
            if "END TIME" in info:
                copy_info.end_time = convert_time_to_timestamp(f"{info.split()[3]} {info.split()[4]}")
        copy_info.backup_time = copy_info.end_time
        first_time_file = os.path.join(self.meta_area, RoachConstant.FIRST_TIME_FILE_NAME)
        log.info("Succeed to get normal copy info")
        exec_overwrite_file(first_time_file, {'first_time': copy_info.backup_time})
        return copy_info

    def rollback_after_backup_failed(self, file_list):
        """
        备份失败执行回滚
        :return:
        """
        log.info("Start rollback directory info after backup failed")
        # 备份失败回滚backupset_disk.ini
        roach_meta_file = os.path.join(self.metadata_path, RoachConstant.ROACH_META_FILE_NAME)
        source_file = os.path.join(self.cache_area, RoachConstant.ROACH_META_FILE_NAME)
        if os.path.exists(source_file):
            exec_cp_cmd(source_file, roach_meta_file)
            change_path_permission(roach_meta_file, self.user_name)
        if not file_list:
            log.info(f"Directory is null, no need to remove")
            return False
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            self.clean_directory(self.log_arch, file_list)
        else:
            roach_data_path = os.path.join(self.media_path, "roach")
            roach_meta_path = os.path.join(self.metadata_path, "roach")
            # 清理mediaData/roach新增目录
            self.clean_directory(roach_data_path, file_list)
            self.clean_directory(roach_meta_path, file_list)
        log.info("Succeed to clean directory")
        return True

    def save_dir_tree(self):
        """
        备份前记录目录信息
        :return:
        """
        log.info("Get data_area directory info before backup")
        file_list = []
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            if os.path.isdir(self.log_arch):
                file_list.extend(os.listdir(self.log_arch))
        else:
            # 备份前先备份backupset_disk.ini
            roach_meta_file = os.path.join(self.metadata_path, RoachConstant.ROACH_META_FILE_NAME)
            if os.path.exists(roach_meta_file):
                copy(roach_meta_file, self.cache_area)
            roach_data_path = os.path.join(self.media_path, "roach")
            if os.path.isdir(roach_data_path):
                file_list.extend(os.listdir(roach_data_path))
        log.info("Succeed to get directory info")
        return file_list

    def check_backup_on_standby(self):
        deploy_type = GaussCluster.get_deploy_type()
        if self.backup_on_standby and deploy_type == DeployType.CLUSTER_TYPE:
            if GaussCluster.get_local_role() == RoleType.STANDBY:
                return True
        return False

    def prepare_backup_cmd(self):
        roach_home = GaussCluster.get_roach_home(self.user_name)
        if not roach_home:
            log.error("Failed when get roach_home")
            return ""
        if check_command_injection_exclude_quote(roach_home):
            log.error(f"The roach_home parameter contains special characters.")
            return ''
        backup_port = GaussCluster.check_port()
        if not backup_port:
            log.error(f"The backup port parameter is none.")
            return ''
        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            backup_param = f"--media-destination {self.media_path} --metadata-destination {self.metadata_path}"
            backup_param = self.deal_with_backup_param(backup_param)
        elif self.backup_type == BackupTypeEnum.INCRE_BACKUP:
            try:
                # 增量备份获取全量副本
                last_backup_key = ParamProtection.get_full_backup_key(self.meta_area)
            except (FileNotFoundError, IOError):
                log.exception("Failed to get full backup key")
                return ""
            backup_param = f"--media-destination {self.media_path} --metadata-destination {self.metadata_path} " \
                           f"--incremental-type all --prior-backup-key {last_backup_key}"
            backup_param = self.deal_with_backup_param(backup_param)
        elif self.backup_type == BackupTypeEnum.DIFF_BACKUP:
            meta_path = self.meta_area
            last_backup_key = ParamProtection.latest_backup_key(meta_path)
            backup_param = f"--media-destination {self.media_path} --metadata-destination {self.metadata_path} " \
                           f"--incremental-type newest --prior-backup-key {last_backup_key}"
            backup_param = self.deal_with_backup_param(backup_param)
        elif self.backup_type == BackupTypeEnum.LOG_BACKUP:
            mount_bind_backup_path(self.data_area, self.meta_area, self.mount_log_area,
                                   backup_type=BackupTypeEnum.LOG_BACKUP)
            backup_param = f"--metadata-destination {self.metadata_path} --arch-destination " \
                           f"{self.log_area} --arch"
        else:
            return ""
        cmd = f"{roach_home}{RoachConstant.ROACH_BACKUP} --master-port {backup_port} {backup_param}"
        if self.check_backup_on_standby():
            log.info("Backup will be executed on standby!")
            cmd = f"{roach_home}{RoachConstant.ROACH_BACKUP} --master-port {backup_port} {backup_param}" \
                  f" --backup-on-standby"
        # 命令注入修改
        if gaussdbt_check_user_name_and_injection(f"{roach_home}"):
            log.error("The parameter verification fails.")
            return ""
        log.info("Successfully prepared backup command.")
        return cmd

    def deal_with_backup_param(self, backup_param):
        if self.parallel_process:
            backup_param = f"{backup_param} --parallel-process {self.parallel_process}"
        return backup_param

    def backup(self):
        log.info(f"Start to backup task, backup_type:{self.backup_type}")
        # 备份前保存数据区目录信息
        directory_info_before_backup = self.save_dir_tree()
        # roach命令拼接
        if not mount_bind_backup_path(self.data_area, self.meta_area):
            log.error("Failed to mount bind backup path")
            return False
        backup_cmd = self.prepare_backup_cmd()
        if not backup_cmd:
            log.error("Failed to get prepare backup cmd")
            umount_bind_backup_path()
            return False
        tmp_file = os.path.join(self.cache_area, self.backup_id)
        if check_command_injection_exclude_quote(tmp_file):
            log.error(f"tmp_file: {tmp_file} is invailed")
            return False
        cmd = f'su - {self.user_name} -c "{backup_cmd} >{tmp_file}"'
        return_code, std_out, std_err = execute_cmd(cmd)
        if return_code != "0":
            cmd_error = ""
            ex_info = ""
            try:
                with open(tmp_file, "r", encoding='UTF-8') as f:
                    cmd_error = f.read()
            except Exception as ex:
                ex_info = str(ex)
            finally:
                data = f"[FAILURE] Std err: {std_err} Exception: {ex_info}"
                exec_append_file(tmp_file, data)
                log.error(f"Fail to exec backup, err: {cmd_error}, std err: {std_err}")
            # 如果是日志备份失败
            if self.backup_type == BackupTypeEnum.LOG_BACKUP:
                auto_full_backup_file = os.path.join(self.meta_area, RoachConstant.AUTO_FULL_BACKUP_FILE_NAME)
                if not os.path.exists(auto_full_backup_file):
                    touch_file(auto_full_backup_file)
            # 回滚至备份前
            self.rollback_after_backup_failed(directory_info_before_backup)
            umount_bind_backup_path()
            return True
        else:
            if self.backup_type == BackupTypeEnum.FULL_BACKUP:
                auto_full_backup_file = os.path.join(self.meta_area, RoachConstant.AUTO_FULL_BACKUP_FILE_NAME)
                if os.path.isfile(auto_full_backup_file) and check_path_legal(auto_full_backup_file, self.meta_area):
                    os.remove(auto_full_backup_file)
        log.info("Succeed to exec backup")
        umount_bind_backup_path()
        return True

    def set_data_copy_rep_info(self):
        data_rep_rsp = []
        sub_dir = ""
        backup_key = ParamProtection.get_backup_key(self.cache_area, self.backup_id)
        deploy_type = GaussCluster.get_deploy_type()
        if deploy_type == DeployType.CLUSTER_TYPE and not check_uid_consistency(self.user_name, self.pid):
            local_ip = GaussCluster.get_endpoint_by_hostname()
            log.info(f"User uid is not consistent, set sub_dir as {local_ip}.")
            sub_dir = f"/{local_ip}"
        if not backup_key:
            return data_rep_rsp
        for repository in self.repositories:
            repository_type = repository.get("repositoryType")
            if repository_type == RepositoryDataTypeEnum.DATA_REPOSITORY:
                repository["remotePath"] = f"{repository.get('remotePath')}{sub_dir}/data/mediadata/roach/{backup_key}"
                data_rep_rsp.append(repository)
            elif repository_type == RepositoryDataTypeEnum.META_REPOSITORY:
                repository["remotePath"] = f"{repository.get('remotePath')}{sub_dir}/meta/roach/{backup_key}"
                data_rep_rsp.append(repository)
        return data_rep_rsp

    def set_log_copy_rep_info(self):
        data_rep_rsp = []
        sub_dir = ""
        backup_key = ParamProtection.get_backup_key(self.cache_area, self.backup_id)
        deploy_type = GaussCluster.get_deploy_type()
        if deploy_type == DeployType.CLUSTER_TYPE and not check_uid_consistency(self.user_name, self.pid):
            local_ip = GaussCluster.get_endpoint_by_hostname()
            log.info(f"User uid is not consistent, set sub_dir as {local_ip}.")
            sub_dir = f"/{local_ip}"
        if not backup_key:
            return data_rep_rsp
        for repository in self.repositories:
            repository_type = repository.get("repositoryType")
            if repository_type == RepositoryDataTypeEnum.LOG_REPOSITORY:
                repository["remotePath"] = f"{repository.get('remotePath')}/data{sub_dir}/roach_arch/{backup_key}"
                data_rep_rsp.append(repository)
            elif repository_type == RepositoryDataTypeEnum.DATA_REPOSITORY:
                repository["remotePath"] = f"{repository.get('remotePath')}{sub_dir}/data/mediadata/roach/{backup_key}"
                dir_path = f"{self.data_area}/mediadata/roach/{backup_key}"
                if not os.path.exists(dir_path):
                    exec_mkdir_cmd(dir_path)
                data_rep_rsp.append(repository)
            elif repository_type == RepositoryDataTypeEnum.META_REPOSITORY:
                repository["remotePath"] = f"{repository.get('remotePath')}{sub_dir}/meta/log"
                dir_path = f"{self.meta_area}/log"
                if not os.path.exists(dir_path):
                    exec_mkdir_cmd(dir_path)
                data_rep_rsp.append(repository)
        return data_rep_rsp

    def copy_backup_infos(self, src, dest):
        backup_infos = ["firstTime"]
        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            # 全量备份保存backup_key，增量备份需要读
            backup_infos.append("LastFullCopyKey")
        if self.backup_type != BackupTypeEnum.LOG_BACKUP:
            backup_infos.append("LastBackupKey")
        if self.backup_type == BackupTypeEnum.DIFF_BACKUP:
            backup_infos.append("LastDiffBackupKey")
        for backup_info in backup_infos:
            src_file = os.path.join(src, backup_info)
            copy_cmd = f"cp -rp {src_file} {dest}"
            return_code, std_out, std_err = execute_cmd(copy_cmd)
            if return_code != "0":
                log.error(f"Failed to copy backup info: {backup_info}, err: {std_err}, sub_job: {self.sub_job_id}")
                return False
        return True

    def copy_meta_and_backup_info_to_other_hosts(self):
        """
        集群模式uid不一致时数据备份复制meta信息和backup信息到其他节点对应的仓库
        """
        deploy_type = GaussCluster.get_deploy_type()
        if deploy_type != DeployType.CLUSTER_TYPE or self.backup_type == BackupTypeEnum.LOG_BACKUP:
            return True
        if check_uid_consistency(self.user_name, self.pid):
            log.info(f"Cluster node uid is consistent, no need to copy meta info.")
            return True
        log.info("Copy meta info to other hosts' repositories.")
        backup_key = ParamProtection.get_backup_key(self.cache_area, self.backup_id)
        all_nodes = GaussCluster.get_all_node()
        if not all_nodes:
            log.error("Node information is empty!")
            return False
        hostname = GaussCluster.get_hostname()
        src_meta_info_path = os.path.join(self.meta_area, "roach", backup_key, "metadata.json")
        src_backup_info_path = os.path.join(self.meta_area)
        dest_meta_path = os.path.dirname(self.meta_path)
        for node in all_nodes:
            if node.node_name != hostname:
                node_ip = node.node_ip
                log.info(f"Copy meta info to host: {node_ip}")
                dest_meta_info_path = os.path.join(dest_meta_path, node_ip, "meta", "roach", backup_key)
                dest_backup_info_path = os.path.join(dest_meta_path, node_ip, "meta")
                ret = exec_cp_dir_no_user(src_meta_info_path, dest_meta_info_path, is_check_white_list=False)
                if not ret:
                    log.error(f"Failed to copy meta info dir: {src_meta_info_path}.")
                    return False
                if not self.copy_backup_infos(src_backup_info_path, dest_backup_info_path):
                    return False
        log.info("Copy meta info to other hosts successfully.")
        return True

    def query_copy_info(self, copy_id):
        log.info(f"Start to query copy info, copy_id: {copy_id}")
        copy_info = Copy(id=copy_id)
        backup_key = ParamProtection.get_backup_key(self.cache_area, self.backup_id)
        if not mount_bind_backup_path(self.data_area, self.meta_area):
            log.error("Failed to mount bind backup path")
            return False, copy_info
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            extend_copy_info = self.get_log_copy_info(backup_key)
        else:
            extend_copy_info = self.get_copy_info(backup_key)
            extend_copy_info.meta_path = os.path.join("meta", "roach", backup_key)
        if not extend_copy_info.backup_index_id:
            log.error(f"Failed to query the extend copy info of copy_id: {copy_id}")
            umount_bind_backup_path()
            return False, copy_info
        # 上报副本时间戳信息
        copy_info.timestamp = extend_copy_info.backup_time
        if not os.path.exists(self.meta_area):
            exec_mkdir_cmd(self.meta_area)
        self.save_backup_info(backup_key)
        # 保存元数据信息
        file_path = os.path.join(self.meta_area, "roach", backup_key)
        if not os.path.exists(file_path):
            exec_mkdir_cmd(file_path)
        meta_data = MetaDataInfo(backupId=self.backup_id, backupType=self.backup_type, backupIndexId=backup_key,
                                 version=GaussCluster.get_gdb_version(),
                                 backupDir=extend_copy_info.data_path,
                                 copyTime=extend_copy_info.end_time, nodeInfo=GaussCluster.get_all_node())
        if self.backup_type != BackupTypeEnum.LOG_BACKUP:
            backup_file = os.path.join(self.metadata_path, RoachConstant.ROACH_META_FILE_NAME)
            meta_data.backup_file = backup_file
            try:
                copy(backup_file, file_path)
            except Exception:
                log.error(f'Failed copy for archive!')
                return False, copy_info
        else:
            meta_data.begin_time = extend_copy_info.begin_time
            meta_data.end_time = extend_copy_info.end_time
        log.info("Start to copy roach meta info")
        copy_info.extend_info = extend_copy_info
        if self.backup_type != BackupTypeEnum.LOG_BACKUP:
            copy_info.repositories = self.set_data_copy_rep_info()
        else:
            copy_info.repositories = self.set_log_copy_rep_info()
        filename = os.path.join(file_path, "metadata.json")
        exec_overwrite_file(filename, meta_data.dict(by_alias=True))
        if not self.copy_meta_and_backup_info_to_other_hosts():
            return False, copy_info
        log.info(f"Succeed to query copy info, copy_id: {copy_id}")
        umount_bind_backup_path()
        return True, copy_info

    def save_backup_info(self, backup_key):
        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            # 全量备份保存backup_key，增量备份需要读
            save_backup_key(self.meta_area, backup_key, "LastFullCopyKey")
        if self.backup_type != BackupTypeEnum.LOG_BACKUP:
            save_backup_key(self.meta_area, backup_key, "LastBackupKey")
        if self.backup_type == BackupTypeEnum.DIFF_BACKUP:
            save_backup_key(self.meta_area, backup_key, "LastDiffBackupKey")


def stop_backup(user_name):
    """
    停止备份
    :return:
    """
    log.info("Start to stop backup task")
    umount_bind_backup_path()
    roach_home = GaussCluster.get_roach_home(user_name)
    if check_command_injection_exclude_quote(roach_home):
        log.error(f"The roach_home parameter contains special characters.")
        return False
    cmd = f"su - {user_name} -c '{roach_home}{RoachConstant.ROACH_STOP}'"
    return_code, std_out, std_err = execute_cmd(cmd)
    if return_code != "0":
        log.error(f"Failed to stop backup, err: {std_err}")
        return False
    log.info("Succeed to stop backup task")
    return True
