#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import os
import time
import sqlite3

from common.const import SubJobStatusEnum, DBLogLevel, BackupTypeEnum, CMDResult, ReportDBLabel, \
    RepositoryDataTypeEnum, JobData
from common.exception.common_exception import ErrCodeException
from common.logger import Logger
from common.util.cmd_utils import cmd_format
from common.util.exec_utils import exec_cp_cmd, su_exec_rm_cmd
from common.common_models import RepositoryPath, ScanRepositories
from informix.comm.comm import set_user, set_permisson, get_last_copy_info, remove_dir, \
    set_xbsa_user, set_xbsa_josn, check_mount, mkdir_set_permissions, \
    mount_filesystem, aggregate_single_copy_object_data, pre_log_backup_path, remove_file_dir, clean_dir, \
    output_execution_result_ex, execute_cmd, read_tmp_json_file, iif_exec_cmd_with_informix_env, \
    umount, get_last_root_dbs_time, get_informix_dir, read_file_by_line, write_file_by_line, check_command_injection, \
    mount_by_script, check_mount_path, set_informix_conf, check_xbsa_config, check_path_in_white_list, \
    modify_path_permissions, identify_env_user, output_result_file, get_remote_host_ips, \
    get_std_in_variable, remove_file
from informix.comm.common_cmd import CommonCommand
from informix.comm.common_models import SubJobDetails, LogDetail, ActionResult, CopyDataModel
from informix.comm.const import BackupType, SubJobType, SubJobPolicy, SubJobPriority, InformixPath, PERMISSION_755, \
    BackupStatus, MAX_FILE_NUMBER_OF_LOG_BACKUP, PathInfo, ErrorCode, InformixBaseCommand, BACKUP_THREAD_NUM, \
    BACKUP_MAX_MEMORY, ON_LINE_SINGLE, InformixInfo, SqlConstant, PERMISSION_644
from informix.comm.const import StatusOrVersion, InformixInstanceStatus, InformixCustomParmConstant, \
    JsonConstant, EnvNameValue, MountType
from informix.service.BaseService import MetaServiceWorker
from informix.service.backup.backup_service import InformixBackup
from informix.service.resource.resource_service import InformixInfos
from informix.util.backup import query_progress
from informix.util.scanner_utils import scan_dir_size

log = Logger().get_logger("informix_plugin.log")


class BackupTask(MetaServiceWorker):

    def __init__(self, job_manager, param_obj):
        super().__init__(job_manager, param_obj)
        self.informixdir = None
        self.job_manager = job_manager
        self.pid = job_manager.pid
        self.param_obj = param_obj
        self.informix_backup_obj = None
        self.servernum = ""
        self.copy_id = ""
        self.start_log_id = -1
        self.resource_id = ''
        self.end_log_id = -1
        self.job_id = job_manager.job_id
        self.sub_job_id = job_manager.sub_job_id
        self.data_repository_path = ''
        self.data_repository_copy_id_path = ''
        self.custom_dict = {}
        self.env_dict = {}
        self.log_area = ''
        self.local_logbackup_path = ''
        self.backup_type = ""
        self.instance_name_curr = ''
        self.backup_dir = ''
        self.log_backup_status = False
        self.cache_file = ''
        self.cache_dir = ''
        self.meta_dir = ''
        self.log_common = ''
        self.sqlhosts_file_curr = ''
        self.onconfig_file_curr = ''
        self.is_informix_version_11 = False
        self.db_user = identify_env_user() if identify_env_user() else InformixInfo.INFORMIX_USER
        self.ixbar_sub_path = None
        self.ixbar_path = None
        self.return_result = None
        self._mount_type = ""
        self._mount_path = ''
        self._repositories = [{}]
        self._osad_ips = ""
        self._osad_auth_port = ""
        self._osad_server_port = ""
        self.pid = JobData.PID

    def init_param(self):
        self.informixdir = get_informix_dir(self.db_user)
        self.informix_backup_obj = InformixBackup(self.param_obj, self.job_manager)
        self.copy_id = self.param_obj.get_copy_id()
        self.servernum = self.param_obj.get_server_num()
        self.data_repository_path = self.param_obj.get_data_path()
        self.data_repository_copy_id_path = os.path.join(self.data_repository_path, self.copy_id)
        self.sqlhosts_file_curr = self.param_obj.get_sqlhosts_file_curr()
        self.resource_id = self.param_obj.get_resource_id()
        self.onconfig_file_curr = self.param_obj.get_onconfig_file_curr()
        self.instance_name_curr = self.param_obj.get_instance_name_curr()
        self.custom_dict = {
            InformixCustomParmConstant.INSTANCE_NAME: self.instance_name_curr,
            InformixCustomParmConstant.SQLHOSTS_PATH: self.sqlhosts_file_curr,
            InformixCustomParmConstant.CONFIG_PATH: self.onconfig_file_curr
        }
        self.env_dict = {
            InformixCustomParmConstant.INSTANCE_NAME: self.instance_name_curr,
            InformixCustomParmConstant.SQLHOSTS_PATH: self.sqlhosts_file_curr,
            InformixCustomParmConstant.CONFIG_NAME: self.param_obj.get_onconfig_file_name_curr()
        }
        self.is_informix_version_11 = InformixInfos.is_informix_version_11(self.custom_dict)
        self.meta_dir = self.param_obj.get_meta_path()
        self.backup_type = self.param_obj.get_backup_type()
        self.log_common = self.informix_backup_obj.get_log_common()
        self.backup_dir = os.path.join(self.data_repository_path, self.copy_id)
        self.cache_dir = self.param_obj.get_cache_path()
        self.log_backup_status = self.param_obj.get_log_backup_status()
        self.log_area = self.param_obj.get_log_path()
        local_log_backup_path = os.path.join(self.param_obj.get_local_log_backup_path(self.db_user), self.resource_id) \
            if self.param_obj.get_local_log_backup_path(self.db_user) else ""
        self.local_logbackup_path = \
            local_log_backup_path if local_log_backup_path else self.data_repository_copy_id_path
        self.ixbar_sub_path = 'etc' if self.db_user == InformixInfo.INFORMIX_USER else 'backups'
        self._mount_type = self.param_obj.get_job_extend_info().get(JsonConstant.AGENT_MOUNT_TYPE, "")
        self._mount_path = os.path.join(self.local_logbackup_path, "data", "1", self.instance_name_curr)
        self._repositories = self.param_obj.get_job_repositories()
        self._osad_ip_list = get_remote_host_ips(self._repositories)
        self._osad_auth_port = get_std_in_variable(f"{EnvNameValue.IAM_OSADAUTHPORT}_{self.pid}")
        self._osad_server_port = self.param_obj.get_job_extend_info().get("OSADServerPort", "")
        self.ixbar_path = os.path.join(self.informixdir, self.ixbar_sub_path, f'ixbar.{self.servernum}')

    def backup_allow(self):
        """
        查询是否允许备份：
        1. 当前节点为主节点（集群场景只有主节点才能进行备份）
        2. 当前数据库状态为在线
        :return:
        """
        informix_status = InformixInfos.get_informix_version_or_status(self.custom_dict, StatusOrVersion.STATUS)
        is_match = informix_status in InformixInstanceStatus.ON_LINE_CLUSTER_MASTER_NODE \
            if self.param_obj.is_cluster else informix_status in ON_LINE_SINGLE
        if not is_match:
            log.info("The status of current node is not on-line. %s", self.log_common)
            self.return_result = ActionResult(code=ErrorCode.DB_NOT_RUNNING.value)
            self.return_result.body_err = ErrorCode.DB_NOT_RUNNING.value
            self.update_result()

        if self.backup_type == BackupTypeEnum.LOG_BACKUP and not self.log_backup_status:
            log.info("Log backup is disabled. %s", self.log_common)
            self.return_result = ActionResult(code=ErrorCode.DB_ARCH_NOT_OPEN.value)
            self.return_result.body_err = ErrorCode.DB_ARCH_NOT_OPEN.value
            self.update_result()
        if self.is_informix_version_11 and not check_xbsa_config(self.onconfig_file_curr):
            log.error(f"The xbsa library is not configured, {self.log_common}.")
            self.return_result = ActionResult(code=ErrorCode.ERR_DB_NOT_CONFIG_XBSA.value)
            self.return_result.body_err = ErrorCode.ERR_DB_NOT_CONFIG_XBSA.value
            self.update_result()

    def check_backup_job_type(self):
        """
        检查备份类型:
        1. 全量：直接返回成功
        2. 差异：依赖全量，无全量返回转全量的结果
        3. 增量：依赖差异，无差异返回转差异的结果
        """
        log.info("Enter check_backup_job_type service.%s", self.backup_type)
        if self.backup_type not in [1, 2, 3, 4]:
            raise ErrCodeException(1)
        if self.backup_type == 1 or self.backup_type == 4:
            return
        self.return_result = ActionResult(code=1577209901, body_err=1577209901)
        try:
            copy_info = get_last_copy_info(self.backup_type, self.job_id, self.sub_job_id,
                                           self.param_obj.get_job_protectobject())
        except Exception as e_obj:
            log.exception(e_obj)
            self.update_result()
            return
        if not copy_info:
            self.update_result()
            return
        if self.backup_type == 2 and copy_info.get('type') == 'full':
            # 差异副本依赖增量副本，如果差异备份前无增量备份，则差异转全量
            log.info("Transfer diff to increment")
            self.return_result = ActionResult(code=1577210128, body_err=1577210128)
            self.update_result()
            return
        base_copy_instance = copy_info.get('extendInfo', {}).get("instance_name", '')
        if base_copy_instance != self.instance_name_curr:
            self.update_result()
            return
        return

    def pre_config_xbsa_library(self):
        """
        配置xbsa库
        """
        # 配置xbsa库
        if not self.is_informix_version_11:
            if not self.informix_backup_obj.add_backup_file_device():
                log.error("Set xbsa lib fail. %s", self.log_common)
                self._support_progress(SubJobStatusEnum.FAILED)
                return False
        else:
            log.info("Current informix version is 11, you should manual set xbsa lib.")

        # 配置xbsa库版本文件
        if not self.pre_sm_version_file():
            return False

        if not set_informix_conf(self.db_user):
            return False

        return True

    def pre_config_log_backup(self):
        """
        配置开启日志备份
        """
        log.info(f"Start config log backup, log_backup_status: {self.log_backup_status}.")
        if self.log_backup_status and not self.is_informix_version_11:
            log_backup_tool_path = os.path.join(self.informixdir, InformixPath.LOG_BACKUP_TOOL_PATH)
            if not self.informix_backup_obj.open_log_auto_backup(log_backup_tool_path):
                self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
                log.error(f"Failed to enable automatic log backup. {self.log_common}", self.log_common)
                return False
        else:
            log.info("Log backup is not enabled")
        return True

    def pre_sm_version_file(self):
        """
        配置xbsa版本文件（informix11）
        """
        if self.is_informix_version_11:
            dst_sm_version_file = os.path.join(self.informixdir, "etc", "sm_versions")
            if not os.path.exists(dst_sm_version_file):
                src_sm_version_file = os.path.join(self.informixdir, "etc", "sm_versions.std")
                ret = exec_cp_cmd(src_sm_version_file, dst_sm_version_file, self.db_user, None)
                if not ret:
                    log.error(f"Failed to generate the sm_versions file. {self.log_common}")
                    return False
                log.info("The sm_versions file is generated successfully.")
            else:
                log.info("The sm_versions file already exists.")
        else:
            log.info("The current version is not informix 11, and the sm_versions file is not required.")
        return True

    def backup_pre_task(self):
        """
        执行备份任务前的前置任务
        1、创建副本路径
        2、记录cache文件信息，task信息供xbsa读取
        """
        log.info(f"Enter backup_pre_task")
        self._support_progress(SubJobStatusEnum.RUNNING, progress=1, log_level=DBLogLevel.INFO)
        # 设计onbar调用的xbsa动态库地址
        if not self.pre_config_xbsa_library() or not self.pre_config_log_backup():
            log.error("Failed to set config for onbar.")
            return

        if self.backup_type == BackupTypeEnum.FULL_BACKUP:
            if not self.delete_xbsa_db_file():
                self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
                return

        # 创建cache仓tmp目录
        cache_tmp_path = os.path.join(self.cache_dir, "tmp", self.copy_id)
        ret = CommonCommand.create_dir(cache_tmp_path, self.db_user)
        if not ret:
            log.error("Failed to create %s.", cache_tmp_path)
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return

        if not set_xbsa_user(self.db_user):
            log.error("Failed to set xbsa user.")
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        if not set_xbsa_josn():
            log.error("Set xbsa xml fail.%s", self.log_common)
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            self.log_backup_pre_task()
            return
        self.data_backup_pre_task()
        log.info(f"End backup_pre_task success")
        return

    def delete_xbsa_db_file(self):
        src_db_path = os.path.join(PathInfo.XBSA_META_PATH, "objectmeta", self.instance_name_curr,
                                   f"{self.instance_name_curr}.db")
        if not os.path.exists(src_db_path):
            log.info(f"DB file Not exist: {src_db_path}, nothing to do.")
            return True
        ret = su_exec_rm_cmd(src_db_path)
        if not ret:
            log.error(f"Rm Pre DB file failed. ret: {ret}")
            return False
        return True

    def data_backup_pre_task(self):
        log.info(f"Enter data_backup_pre_task..")
        ret = CommonCommand.create_dir(self.data_repository_copy_id_path, self.db_user)
        if not ret:
            log.error("Failed to create %s path.", self.data_repository_copy_id_path)
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        if not self.pre_cache_info():
            log.error(f"Failed to pre chche info.")
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        if not self.pre_task_info():
            log.error(f"Failed to pre task info.")
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        if not self.per_db_path():
            log.error("Failed to pre db path.")
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return

        if not self.check_log_mount(self._mount_path):
            log.error("Failed to check log mount path.%s", self.log_common)
            self._support_progress(SubJobStatusEnum.FAILED, 1,
                                   log_detail=ErrorCode.ERROR_UMOUNT_LOG_PATH.value, log_level=DBLogLevel.ERROR)
            return
        task_id = self.job_id if not self.log_backup_status else self.resource_id
        if not pre_log_backup_path(self.local_logbackup_path, self.instance_name_curr, task_id, "backup", self.db_user):
            log.error("Failed to pre log backup path.%s", self.log_common)
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        if self.log_backup_status and not self.mount_log_path():
            log.error("Failed to open log backup.%s", self.log_common)
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        self._support_progress(SubJobStatusEnum.COMPLETED, progress=100, log_level=DBLogLevel.INFO)
        return

    def check_log_mount(self, mount_path):
        log.info("Enter check_log_mount")
        if not self.log_backup_status:
            return True
        if self.param_obj.is_san_client_involved():
            log.info("Enter is_san_client_involved")
            ret = check_mount_path(mount_path)
        else:
            log.info("Enter checks")
            ret = check_mount(mount_path, self.resource_id)
        if ret:
            set_user(mount_path, self.db_user)
            return True
        if not umount(mount_path):
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return False
        return True

    def mount_log_path(self):
        remote_path, ip = self.param_obj.get_log_remotepath_ip()
        if not all((remote_path, ip)):
            return False
        if check_mount(self._mount_path, self.resource_id):
            set_user(self._mount_path, self.db_user)
            return True
        if not os.path.exists(self._mount_path):
            if not mkdir_set_permissions(os.path.join(self.local_logbackup_path, "data", "1"), self.db_user):
                return False
            if self._mount_type != MountType.FUSE and not mkdir_set_permissions(self._mount_path, self.db_user):
                return False
        real_mount_path = self._mount_path
        if self._mount_type == MountType.FUSE:
            real_mount_path = os.path.join(self.local_logbackup_path, "data", "remote")
            if not mkdir_set_permissions(real_mount_path, self.db_user):
                return False
        log.info(f"Real log mount path is: {real_mount_path}.")
        log.info("San client is involved: %s", self.param_obj.is_san_client_involved())
        if self.param_obj.is_san_client_involved():
            ret = mount_by_script(self.job_id, self.param_obj.get_log_repository(),
                                  self._mount_path, self.param_obj.get_job_extend_info())
        else:
            ret = mount_filesystem(ip, remote_path, real_mount_path, self._mount_type,
                                   [self._osad_ip_list, self._osad_auth_port, self._osad_server_port, self.job_id])
        if not ret:
            return False
    
        if self._mount_type == MountType.FUSE:
            src = os.path.join(real_mount_path, os.path.basename(remote_path))
            # E1000本地盘场景，使用fuse方式挂载文件系统（远端挂载点只能为根目录），创建软连接，以保证与mount挂载方式的目录结构保持一致
            # 先删除目录，再创建软连接
            remove_dir(self._mount_path)
            log.info(f"Fuse mounting scenario to create soft connections: src: {src}, dst: {self._mount_path}")
            os.symlink(src, self._mount_path)
    
        log_backup_dir = os.path.join(self._mount_path, self.instance_name_curr)
        if not mkdir_set_permissions(log_backup_dir, self.db_user, self._mount_type):
            log.error("Failed to create the log backup directory.")
            return False
        return True

    def log_backup_pre_task(self):
        """日志备份前置准备"""
        # 创建日志备份目录
        path = os.path.join(self.log_area, self.copy_id)
        log.info(f"Log path: {path}")
        ret = CommonCommand.create_dir(path, self.db_user)
        if not ret:
            log.error("Failed to create %s path.%s", self.data_repository_copy_id_path, self.log_common)
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        path = os.path.join(self.cache_dir, self.job_id)
        log.info(f"Cache path: {path}")
        ret = CommonCommand.create_dir(path, self.db_user)
        if not ret:
            log.error("Failed to create %s path.%s", path, self.log_common)
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        if not check_mount(self._mount_path, self.resource_id):
            log.error("No file system is mounted to the local log backup path.%s", self.log_common)
            if not self.check_log_mount(self._mount_path) or not self.mount_log_path():
                log.error(f"Failed to remount the log repository. {self.log_common}")
                self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
                return
        if not self.pre_cache_info():
            log.error(f"Failed to pre chche info.")
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        if not pre_log_backup_path(self.local_logbackup_path, self.instance_name_curr, self.job_id,
                                   "backup", self.db_user):
            log.error("Failed to pre log backup path.%s", self.log_common)
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        if not self.pre_task_info():
            log.error(f"Failed to pre task info.")
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        self._support_progress(SubJobStatusEnum.COMPLETED, 100, 0, DBLogLevel.INFO)
        return

    def gen_sub_job(self):
        job_priority = SubJobPriority.JOB_PRIORITY_1
        exec_node_id = InformixInfos.get_host_uuid()
        job_info = ""
        job_name = "informix-subjob"
        self.return_result = [self.build_sub_job(job_priority, exec_node_id, job_info, job_name)]
        self.update_result()

    def backup(self):
        """
        1. 设置环境变量（export INFORMIXSQLHOSTS=sqlhostpath，export ONCONFIG=onconfig.xxx，export INFORMIXSERVER=xxx
            设置xbsa动态库
        2. 查询当前逻辑日志id，id_before（onstat -l | grep "-C"）
        3. 调用备份命令（onbar -b -L 0）
        4. 查询当前逻辑日志id，id_after
        5. 拷贝紧急引导文件onconfig.instancename、oncfg_instance.servernum文件（su - informix -c "cp 副本（copy_id）目录"）
        6. 拷贝id为id_before到id_after之间的所有日志副本（先不考虑，当前的瓶颈所在）
        :return:
        """
        # 进度上报信息
        self._support_progress(SubJobStatusEnum.RUNNING, 1, 0, DBLogLevel.INFO, 1)
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            self.log_backup()
            return
        # 备份前逻辑日志id
        self.start_log_id = self.informix_backup_obj.get_logical_id()
        if not self.start_log_id:
            log.warn("Failed to obtain logical id before backup operation.")
            self.support_subjob_progress(SubJobStatusEnum.FAILED)
        # 根据当前备份类型进行备份
        backup_type_para = BackupType.BACKUP_TYPE_DICT.get(self.backup_type, -1)
        if backup_type_para == -1:
            log.error("Failed to obtain backup type.")
            self.support_subjob_progress(SubJobStatusEnum.FAILED)
            return
        file_path = os.path.join(self.cache_dir, f"{self.sub_job_id}_backup_info.txt")
        self.touch_backup_tmp_file(file_path)
        ret, res = self.informix_backup_obj.exec_backup(backup_type_para, self.log_backup_status, file_path)
        if not ret:
            log.error(f"Failed to exec backup--{backup_type_para} operation.")
            self.support_subjob_progress(SubJobStatusEnum.FAILED)
            return

        ret, backup_time = get_last_root_dbs_time(self.ixbar_path)
        if not ret:
            log.error(f"Failed to exec backup--{backup_type_para} operation.")
            self.support_subjob_progress(SubJobStatusEnum.FAILED)
            return

        # 备份后逻辑日志id
        self.end_log_id = self.informix_backup_obj.get_logical_id()
        if not self.end_log_id:
            log.warn("Failed to obtain logical id before backup operation.")
            self.support_subjob_progress(SubJobStatusEnum.FAILED)
            return
        if self.log_backup_status and not self.copy_log(self.start_log_id, self.end_log_id):
            log.error(f"Failed to copy log.")
            self.support_subjob_progress(SubJobStatusEnum.FAILED)
            return
        if not self.update_db_file() or not self.update_meta_db():
            log.error("Failed to updata db file.")
            self.support_subjob_progress(SubJobStatusEnum.FAILED)
            return
        # 拷贝紧急引导文件
        ret = self.copy_emergency_boot_files()
        if not ret:
            log.error(f"Failed to copy emergency boot files.")
            self.support_subjob_progress(SubJobStatusEnum.FAILED)
            return
        if not self.informix_backup_obj.support_info(backup_time):  # 上报副本信息
            log.error(f"Failed to delete backup device.")
            self.support_subjob_progress(SubJobStatusEnum.FAILED)
            return
        self.report_complete()
        return

    def report_complete(self):
        copy_size = self.get_copy_size()
        self.support_subjob_progress(SubJobStatusEnum.COMPLETED, data_size=copy_size)

    def touch_backup_tmp_file(self, file_path):
        try:
            modify_path_permissions(file_path)
        except Exception as exception:
            log.error(f"exception: {exception}")

    def copy_log(self, logical_id_before, logical_id_after):
        log.info(f"Start copying logs, logical_id_before: {logical_id_before}, logical_id_after: {logical_id_after}.")
        log_path = os.path.join(self._mount_path, self.instance_name_curr, self.servernum)
        server_num_path = os.path.join(self.data_repository_copy_id_path, 'data/1/', self.instance_name_curr,
                                       self.instance_name_curr, self.servernum)
        mkdir_set_permissions(os.path.join(self.data_repository_copy_id_path, 'data/1/', self.instance_name_curr),
                              self.db_user)
        mkdir_set_permissions(server_num_path, self.db_user)
        for log_id in range(int(logical_id_before), int(logical_id_after)):
            is_backup_log = False
            restart_log_progress_times = 0
            while True:
                # 判断日志副本是否生成
                if not self.check_log_backupset(log_id) and not is_backup_log:
                    is_backup_log = True
                    self.backup_logical_log()
                    continue
                elif self.check_logical_log_file_exist(log_id):
                    break
                if restart_log_progress_times == 3:
                    log.error("The log backup process does not exist.%s", self.log_common)
                    self._support_progress(SubJobStatusEnum.FAILED, 1,
                                           log_detail=ErrorCode.ERR_LOG_BACKUP_START_FAIL.value,
                                           log_level=DBLogLevel.ERROR)
                    return False
            
                restart_log_progress_times += 1
                log.warn(f"The log copy is not generated. The number of waits is: {restart_log_progress_times}")
                time.sleep(30)
        
            src_path = os.path.join(log_path, str(log_id))
            if self._mount_type == MountType.FUSE:
                if not self.exec_cp_file(src_path, server_num_path):
                    return False
            else:
                ret = exec_cp_cmd(src_path, server_num_path, self.db_user, None)
                if not ret:
                    return False
        log.info("Copying the log copy succeeded.")
        return True

    def check_log_backupset(self, log_id: int):
        log.info("Enter check log backupset.")
        return_code, out, err = iif_exec_cmd_with_informix_env(self.env_dict,
                                                               InformixBaseCommand.QUERY_STATUS, self.db_user)
        if return_code != CMDResult.SUCCESS:
            log.error('Fail to exec onstat -l,out:%s, err:%s', out, err)
            return False
        log.debug("out:%s.", out)
        logical_line = list(map(lambda x: x.strip(), filter(lambda x: "-B-" in x.strip(), out.split("\n"))))
        if not logical_line:
            log.warn("Failed to obtain logical line.")
            return False
        log.debug("logical_line:%s.", logical_line)
        for logical_info in logical_line:
            logical_line_elem = list(map(lambda x: x.strip(), filter(lambda x: x.strip(), logical_info.split(" "))))
            log.warn("logical_line_elem.%s", logical_line_elem)
            if len(logical_line_elem) < 8:
                log.warn("Failed to obtain correct logical line.")
                continue
            logical_status_info, logical_id = logical_line_elem[2], logical_line_elem[3]
            if logical_id == str(log_id):
                log.info(f"Log {log_id} marked as backed up")
                return True
        log.warn(f"The backup set is not generated, log_id: {log_id}.")
        return False

    def check_logical_log_file_exist(self, log_id: int):
        """
        检查日志文件是否落盘
        """
        log.info(f"Check whether logs: {log_id} exist.")
    
        # 1. 检查文件是否落盘
        log_path = os.path.join(self._mount_path, self.instance_name_curr, self.servernum)
        logical_log_file_path = os.path.join(log_path, str(log_id))
        if not os.path.exists(logical_log_file_path):
            log.warn(f"Log copy not generated, log_id: {log_id}")
            return False
    
        log.info(f"Logical log: {log_id} are archived, file path: {logical_log_file_path}.")
        # 2. 等待日志文件落盘完成
        time.sleep(10)
        return True

    def check_logical_log_copies(self, log_id: int):
        log.info("Start to check the log backup progress.")
        if not self.check_log_backupset(log_id):
            return False

        if not self.check_logical_log_file_exist(log_id):
            return False
        return True
    
    def query_scan_repositories(self):
        log.info(f"Start to query_scan_repositories task.")
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            meta_copy_path = os.path.join(self.meta_dir, "meta", self.copy_id)
            save_path = os.path.dirname(self.meta_dir)
            data_path = self.log_area
        else:
            meta_copy_path = self.meta_dir
            save_path = self.meta_dir
            data_path = os.path.dirname(self.backup_dir)
        log.info(f"Query scan path, {meta_copy_path}, data_path:{data_path},save_path:{save_path}")
        log_meta_copy_repo = RepositoryPath(repositoryType=RepositoryDataTypeEnum.META_REPOSITORY,
                                            scanPath=meta_copy_path)
        log_data_repo = RepositoryPath(repositoryType=RepositoryDataTypeEnum.DATA_REPOSITORY,
                                       scanPath=data_path)
        scan_repos = ScanRepositories(scanRepoList=[log_data_repo, log_meta_copy_repo], savePath=save_path)
        output_result_file(self.pid, scan_repos.dict(by_alias=True))
        log.info(f"Query_scan_repositories task exec success.")
        
        
    def backup_post_job(self):
        """
        清理备份的遗留信息：
        1. 备份成功：清空cache仓（如cache环缓存里面的信息）
        2. 备份失败：清空cache仓以及副本id目录
        3. 解挂载

        变更：
        3. 不需要解挂载（前置任务重已经不需要挂载，见前置任务）
        :return:
        """
        log.info("Start to execute post task.")
        # 进度上报信息
        self._support_progress(SubJobStatusEnum.RUNNING, 1, log_level=DBLogLevel.INFO, data_size=1)
        post_status = True
        backup_rusult = self.param_obj.get_backup_job_result()
        if backup_rusult:
            log.error(f"Failed extc backup job, clean data repository copyid path")
            remove_dir(self.data_repository_copy_id_path)
        if self.backup_type == BackupTypeEnum.LOG_BACKUP and not backup_rusult:
            self.remove_old_log_backupset()
        clean_dir(self.cache_dir)
        umount(self._mount_path, self._mount_type)
        remove_dir(os.path.dirname(self._mount_path))
        task_status = SubJobStatusEnum.FAILED if not post_status else SubJobStatusEnum.COMPLETED
        db_info_level = DBLogLevel.ERROR if not post_status else DBLogLevel.INFO
        self._support_progress(task_status, 100, 0, db_info_level, 1)

    def remove_old_log_backupset(self):
        log.info("Start to delete expired logs.")
        file_path = os.path.join(self.cache_dir, self.job_id, 'log_copy_list.txt')
        file_info = read_tmp_json_file(file_path)
        log_copy_list = file_info.get("log_list", '')
        log.info(f"Expired Log List: {log_copy_list}.")
        if not log_copy_list:
            return False
        for file in log_copy_list:
            if self._mount_type == MountType.FUSE:
                ret = remove_file(file)
            else:
                ret = remove_file_dir(file)
            if not ret:
                log.warning('Fail to remove file:%s', file)
        return True

    def backup_abort_job(self):
        """
        手动执行取消备份任务，理论上需要返回是否取消成功的信息
        1. 查进程
        2. 杀进程
        :return:
        """
        pass

    def build_sub_job(self, job_priority, exec_node_id, job_info, job_name):
        """
        填充子任务信息
        :return:
        """
        sub_job_info = dict()
        sub_job_info["jobId"] = self.job_id
        sub_job_info["subJobId"] = ""
        sub_job_info["jobType"] = SubJobType.BUSINESS_SUB_JOB
        sub_job_info["jobName"] = job_name
        sub_job_info["jobPriority"] = job_priority
        sub_job_info["policy"] = SubJobPolicy.FIXED_NODE
        sub_job_info["ignoreFailed"] = False
        sub_job_info["execNodeId"] = exec_node_id  # 主机uuid
        sub_job_info["jobInfo"] = job_info
        return sub_job_info

    def pre_cache_info(self):
        """
        准备chache信息，供xbsa读取
        """
        instance_name_curr = self.instance_name_curr
        cache_info = {
            "cacheRepoPath": self.cache_dir,
            "metaRepoPath": self.meta_dir,
            "copyId": self.copy_id,
            "taskId": self.job_id,
            "hostKey": self.instance_name_curr
        }
        cache_file_name = f"informix_{instance_name_curr}_{instance_name_curr}_backup_data.txt"
        cache_file_path = os.path.join(InformixPath.AGENT_STEMP_PATH, cache_file_name)
        output_execution_result_ex(cache_file_path, cache_info)
        return set_permisson(cache_file_path, PERMISSION_644)

    def pre_task_info(self):
        """
        生成备份路径信息供xbsa读取
        """
        task_info = {
            "repositories": [{
                "role": 0,  # 当前仓库的角色，0-master，1-slave
                "deviceSN": "xxxx",  # 设备ESN
                "filesystems": [{  # 数据仓的文件系统列表，不包含cache仓和元数据仓，因为cache仓和元数据仓通过dws_cacheInfo.txt指定了
                    "id": "xxx",
                    "name": "xxx",
                    "sharePath": "xxx",
                    "mountPath": []  # 框架挂载的文件系统本地挂载点路径，不区分协议类型（NFS/dataturbo）
                }]
            }],
            "archiveFileServers": [{
                "ip": "xxx",
                "port": 30066,
                "sslEnabled": ""
            }],
            "taskType": 0,  # 任务类型，0-备份，1-恢复，2-删除
            "copyType": 1  # 副本类型，与agent thrift接口定义的enum CopyDataType类型一致
        }
        task_file_path = os.path.join(self.cache_dir, "tmp", self.copy_id, f'taskInfo_{self.instance_name_curr}.txt')
        repositories = task_info.get("repositories", [])
        if not repositories:
            return False
        filesystems = repositories[0].get("filesystems", [])
        if not filesystems:
            return False
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            copy_path = self.local_logbackup_path
        else:
            copy_path = os.path.join(self.data_repository_path, self.copy_id)
        filesystems[0]["mountPath"] = [copy_path]
        output_execution_result_ex(task_file_path, task_info)
        ret = set_permisson(os.path.join(self.cache_dir, "tmp"), PERMISSION_755)
        if not ret:
            return False
        ret = set_permisson(os.path.join(self.cache_dir, "tmp", self.copy_id), PERMISSION_755)
        if not ret:
            return False
        ret = set_permisson(task_file_path, PERMISSION_644)
        if not ret:
            return False
        return True

    def per_db_path(self):
        copyid_path = os.path.join(self.meta_dir, "meta", self.copy_id)
        ret = CommonCommand.create_dir(copyid_path, user="rdadmin")
        if not ret:
            log.error("Failed to create %s.", copyid_path)
            return False
        ret = set_permisson(os.path.join(self.meta_dir, "meta"), PERMISSION_755)
        if not ret:
            log.error("Set permisson fail %s.", os.path.join(self.meta_dir, 'meta'))
            return False
        db_path = os.path.join(self.meta_dir, "meta", self.copy_id, 'objectmeta', self.instance_name_curr)
        ret = CommonCommand.create_dir(db_path, user="rdadmin")
        if not ret:
            log.error("Failed to create %s.", db_path)
            return False
        ret = set_user(os.path.join(self.meta_dir, "meta"), 'rdadmin')
        if not ret:
            log.error("Set user fail %s.", os.path.join(self.meta_dir, 'meta'))
            return False
        ret = set_user(os.path.join(self.meta_dir, "meta", self.copy_id), "rdadmin")
        if not ret:
            log.error("Set user fail %s.", os.path.join(self.meta_dir, 'meta', self.copy_id))
            return False
        return True

    def backup_logical_log(self):
        """
        备份逻辑日志
        """
        log.info(f"Start backup logical log.")
        current_logical_log_id = self.informix_backup_obj.get_logical_id()
        ret, out, err = iif_exec_cmd_with_informix_env(self.env_dict,
                                                       InformixBaseCommand.ARCH_CURRENT_LOG, self.db_user)
        if ret != CMDResult.SUCCESS:
            log.error(f"Failed to exec cmd: {InformixBaseCommand.ARCH_CURRENT_LOG}, %s.", self.log_common)
            return False

        check_times = 0
        while True:
            if self.check_logical_log_copies(current_logical_log_id):
                break
            if check_times == 6:
                log.error("Failed to archive logical logs.")
                return False
            log.warn(f"Waiting for arch log, log_id: {current_logical_log_id}, check_times: {check_times}.")
            check_times += 1
            time.sleep(10)
        log.info(f"End backup logical log.")
        return True
    
    def get_start_end_log_id(self):
        log_ids = []
        with open(self.ixbar_path, 'r') as file:
            lines = file.readlines()
            for line in lines:
                log_id = line.split()[1]
                if log_id.isdigit():
                    log_ids.append(int(log_id))
        if not log_ids:
            log.error(f"get_start_end_log_id failed.")
            return False
        log.info(f"get_start_end_log_id: {log_ids}")
        self.start_log_id = min(log_ids)
        self.end_log_id = max(log_ids)
        return True

    def log_backup(self):
        """
        日志备份
        """
        if not self.backup_logical_log():
            log.warn("Failed to force to next logical log.")
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        if not self.get_start_end_log_id():
            self._support_progress(SubJobStatusEnum.FAILED, 1, log_level=DBLogLevel.ERROR)
            return
        file_list = self.get_log_files()
        if not file_list:
            log.error(f"Failed to obtain the log backup set.%s", self.log_common)
            self._support_progress(SubJobStatusEnum.FAILED, 1,
                                   log_detail=ErrorCode.ERROR_NO_LOG_CREATE.value, log_level=DBLogLevel.ERROR)
            return
        for i in range(0, len(file_list), MAX_FILE_NUMBER_OF_LOG_BACKUP):
            params_dict = {
                "job_id": self.job_id,
                "files": file_list[i:i + MAX_FILE_NUMBER_OF_LOG_BACKUP],
                "destination": os.path.join(self.log_area, self.copy_id),
                "write_meta": False,
                "thread_num": BACKUP_THREAD_NUM,
                "max_memory": BACKUP_MAX_MEMORY
            }
            result = self.backup_files_ex(params_dict)
            if not result:
                log.error("Failed to backup wal file:%s.%s", len(file_list), self.log_common)
                break
        self.save_log_file_list(file_list)
        if not self.copy_db_file() or not self.update_meta_db():
            self.support_subjob_progress(SubJobStatusEnum.FAILED)
            return
        if not self.informix_backup_obj.support_log_info(self.start_log_id, self.end_log_id):
            self._support_progress(SubJobStatusEnum.FAILED, 1, ErrorCode.ERR_INVALID_LOG_COPY.value, DBLogLevel.ERROR,
                                   1)
            return
        copy_size = self.get_copy_size()
        self.copy_emergency_boot_files()
        self.support_subjob_progress(SubJobStatusEnum.COMPLETED, data_size=copy_size)
        return

    def backup_files_ex(self, params: dict):
        """
        Aix下文件集backup库没有，无法使用文件集备份工具
        :param params:
        :return:
        """
        log.info(f"Start backup log file. {self.log_common}")
        files = params.get('files', [])
        destination = params.get('destination')
        for filename in files:
            if self._mount_type == MountType.FUSE:
                ret = self.exec_cp_file(filename, destination)
            else:
                ret = exec_cp_cmd(filename, destination, self.db_user, None)
            if not ret:
                log.error(f'Failed to back up logs:{filename}')
                return False
        log.info("End backup log file.")
        return True

    def save_log_file_list(self, file_list):
        file_path = os.path.join(self.cache_dir, self.job_id, 'log_copy_list.txt')
        log.info(f"{file_path}")
        output_execution_result_ex(file_path, {"log_list": file_list})

        return True

    def get_log_files(self):
        result = []
        instance_log_backupset_path = os.path.join(self._mount_path, self.instance_name_curr, self.servernum)
        files = os.listdir(instance_log_backupset_path)
        for file in files:
            if not os.path.isfile(os.path.join(instance_log_backupset_path, file)):
                continue
            if not file.isdigit():
                continue
            result.append(os.path.join(instance_log_backupset_path, file))
        return result

    def log_backup_progress(self):
        while True:
            time.sleep(10)
            status, progress, data_size = query_progress(self.job_id)
            log.info(f"Get backup progress: status:{status}, progress:{progress}, "
                     f"data_size:{data_size}.")
            if status == BackupStatus.COMPLETED:
                log.info(f"Backup completed, jobId: {self.job_id}.")
                return True
            elif status == BackupStatus.RUNNING:
                continue
            elif status == BackupStatus.FAILED:
                log.error(f"Backup failed, jobId: {self.job_id}.")
                return False
            else:
                log.error(f"Backup failed, status error jobId: {self.job_id}.")
                return False
        return True

    def update_db_file(self):
        dst_db_path = os.path.join(self.meta_dir, "meta", self.copy_id, 'objectmeta', self.instance_name_curr,
                                   f"{self.instance_name_curr}.db")
        src_db_path = os.path.join(PathInfo.XBSA_META_PATH, "objectmeta", self.instance_name_curr,
                                   f"{self.instance_name_curr}.db")
        copy_data_model = CopyDataModel(src_db_path, dst_db_path, job_id=self.job_id, db_user=self.db_user)
        if not aggregate_single_copy_object_data(copy_data_model, False):
            return False
        return True

    def copy_db_file(self):
        dst_db_path = os.path.join(self.log_area, self.copy_id)
        src_db_path = os.path.join(PathInfo.XBSA_META_PATH, "objectmeta", self.instance_name_curr,
                                   f"{self.instance_name_curr}.db")
        xbsa_read_path = os.path.join(self.meta_dir, 'meta', self.copy_id, 'objectmeta', self.instance_name_curr)
        ret = mkdir_set_permissions(xbsa_read_path, 'rdadmin')
        if not ret:
            log.error("Failed to create %s.%s", xbsa_read_path, self.log_common)
            return False
        if not set_user(os.path.join(self.meta_dir, 'meta'), 'rdadmin'):
            return False
        if not set_user(os.path.join(self.meta_dir, 'meta', self.copy_id), 'rdadmin'):
            return False
        cache_meta_path = os.path.join(self.cache_dir, "meta", self.copy_id, 'objectmeta')
        ret = mkdir_set_permissions(cache_meta_path, 'rdadmin')
        if not ret:
            log.error("Failed to create %s.%s", cache_meta_path, self.log_common)
            return False
        xbsa_dst_path = os.path.join(cache_meta_path, f'backupkey.db')
        xbsa_db_path = os.path.join(xbsa_read_path, f'{self.instance_name_curr}.db')
        ret = exec_cp_cmd(src_db_path, dst_db_path, None, '-f')
        if not ret:
            log.error('Fail to copy db file, src_path:%s', src_db_path)
            return False
        ret = exec_cp_cmd(src_db_path, xbsa_db_path, None, '-f')
        if not ret:
            log.error('Fail to copy db file, src_path:%s', src_db_path)
            return False
        ret = exec_cp_cmd(src_db_path, xbsa_dst_path, None, '-f')
        if not ret:
            log.error('Fail to copy db file, src_path:%s', src_db_path)
            return False
        if not set_user(xbsa_db_path, 'rdadmin'):
            return False
        if not set_user(os.path.join(self.cache_dir, "meta"), 'rdadmin'):
            return False
        if not set_user(xbsa_dst_path, 'rdadmin'):
            return False
        return True

    def get_copy_size(self):
        """
        获取备份副本大小
        :return:
        """
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            copy_path = os.path.join(self.log_area, self.copy_id)
        else:
            copy_path = os.path.join(self.data_repository_path, self.copy_id)
        ret, copy_size = scan_dir_size(self.job_id, copy_path)
        if not ret:
            log.error(f"Get copy size fail. {self.log_common}")
        return copy_size

    def support_subjob_progress(self, task_status, log_detail=None, data_size=0):
        log_detail_inst = None
        if not log_detail and task_status == SubJobStatusEnum.FAILED:
            log_detail_inst = [
                LogDetail(log_info=ReportDBLabel.BACKUP_SUB_FAILED, log_info_param=[self.sub_job_id],
                          log_detail=ErrorCode.BACKUP_DB_FAIL, log_level=DBLogLevel.ERROR)
            ]
        if not log_detail and task_status == SubJobStatusEnum.COMPLETED:
            log_detail_inst = [
                LogDetail(log_info=ReportDBLabel.SUB_JOB_SUCCESS, log_info_param=[self.sub_job_id],
                          log_level=DBLogLevel.INFO)
            ]
        if log_detail:
            log_detail_inst = log_detail
        progress_info = SubJobDetails(task_id=self.job_id, task_status=task_status, progress=0,
                                      sub_task_id=self.sub_job_id, log_detail=log_detail_inst, data_size=data_size)
        self.return_result = progress_info
        self.update_result()

    def copy_emergency_boot_files(self):
        log.info("Start copying the emergency boot files.")
        if not self.copy_ixba_file():
            return False
        if not self.copy_oncfg_file():
            return False
        return True

    def get_copy_path(self):
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            copy_path = os.path.join(self.log_area, self.copy_id)
        else:
            copy_path = self.data_repository_copy_id_path
        return copy_path

    def copy_ixba_file(self):
        log.info("Start updating the ixba copy file.")
        all_content = read_file_by_line(self.ixbar_path)
        if not all_content:
            log.error("The file is empty.")
            return False
        line_num = 0
        for line_record in all_content:
            line_record_list = line_record.split()
            if line_record_list[1] == self.end_log_id and line_record_list[2] == 'L':
                break
            line_num += 1
        del all_content[line_num:]

        copy_path = self.get_copy_path()
        copy_file = os.path.join(copy_path, f'ixbar.{self.servernum}')
        ret, _ = check_path_in_white_list(copy_path)
        if not ret:
            log.error("The path verification is not in the trustlist.")
            return False

        if not write_file_by_line(copy_file, all_content):
            log.error("The parameter is incorrect.")
            return False

        if not set_user(copy_file, self.db_user):
            log.error(f"Failed to set the ixbar permission, .")
            return False
        return True

    def copy_oncfg_file(self):
        log.info("Start copying the oncfg file.")
        oncfg_file = f"oncfg_{self.instance_name_curr}.{self.servernum}"
        oncfg_file_path = os.path.join(self.informixdir, "etc", oncfg_file)
        if self.backup_type == BackupTypeEnum.LOG_BACKUP:
            copy_path = os.path.join(self.log_area, self.copy_id)
        else:
            copy_path = self.data_repository_copy_id_path
        emergency_boot_file_list = [oncfg_file_path]
        ret, res = CommonCommand.copy_file(emergency_boot_file_list, copy_path, db_user=self.db_user)
        if not ret:
            log.error("Failed to copy emergency_boot_file to %s", self.backup_dir)
            return False
        return True

    def update_meta_db(self):
        meta_db_path = os.path.join(self.meta_dir, "meta", self.copy_id, 'objectmeta', self.instance_name_curr,
                                    f"{self.instance_name_curr}.db")
        ret, _ = check_path_in_white_list(meta_db_path)
        if not ret:
            return False
        try:
            object_conn = sqlite3.connect(meta_db_path)
        except Exception as ex:
            log.exception(ex)
            return False
        object_cursor = object_conn.cursor()
        if not object_cursor:
            log.info(f"Connect sqlite file {meta_db_path} failed.")
            return False
        object_query_result = object_cursor.execute(SqlConstant.Query_LOG_RECORDS).fetchall()
        if not object_query_result:
            log.info("Failed to run the query command.")
            return False
        update_ret = True
        start_log_id = int(self.start_log_id)
        end_log_id = int(self.end_log_id)
        for record in object_query_result:
            logical_log_id = int(record[0].split("/")[-1])
            if start_log_id <= logical_log_id < end_log_id:
                continue
            delete_sql = cmd_format(SqlConstant.DELETE_LOG_RECORD, record[0])
            if check_command_injection(delete_sql):
                log.info(f"The parameter is incorrect, filename: {record[0]}. ")
                update_ret = False
                break
            object_cursor.execute(delete_sql)
        if update_ret:
            object_conn.commit()
        object_query_result1 = object_cursor.execute(SqlConstant.Query_LOG_RECORDS).fetchall()
        log.info(object_query_result1)
        object_cursor.close()
        object_conn.close()
        return update_ret

    def _support_progress(self, task_status, progress=1, log_detail=None, log_level=DBLogLevel.INFO, data_size=0):
        log_detail_inst = None
        if log_detail:
            log_detail_inst = [
                LogDetail(log_detail=log_detail, log_info_param=[self.job_id], log_level=log_level.value)
            ]
        progress_info = SubJobDetails(task_id=self.job_id, task_status=task_status, progress=progress,
                                      sub_task_id=self.sub_job_id, log_detail=log_detail_inst, data_size=data_size)
        self.return_result = progress_info
        self.update_result()
        
    def exec_cp_file(self, src_path, des_path):
        cmd = f"su - {self.db_user} -c 'cp {src_path} {des_path}'"
        return_code, std_out, std_err = execute_cmd(cmd)
        if return_code != CMDResult.SUCCESS:
            log.error(f"Copy file failed, src path: {src_path}, des path: {des_path}, error: {std_err}")
            return False
        log.info(f"Copy file success, src path: {src_path}, des path: {des_path}.")
        return True
