/*
* This file is a part of the open-eBackup project.
* This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
* If a copy of the MPL was not distributed with this file, You can obtain one at
* http://mozilla.org/MPL/2.0/.
*
* Copyright (c) [2024] Huawei Technologies Co.,Ltd.
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
*/
#include "nas_share/backup/HetroBackupJob.h"
#include "config_reader/ConfigIniReader.h"
#include "common/CleanMemPwd.h"
#include "common/EnvVarManager.h"
#include "constant/ErrorCodes.h"
#include "client/ClientInvoke.h"
#include "ShareResourceManager.h"
#include "system/System.hpp"
#include "ScanMgr.h"
#include "log/Log.h"
#include "utils/PluginConfig.h"
#include "common/Utils.h"

using namespace std;
using namespace PluginUtils;
using namespace Module;

#define ENTER                                                                                                   \
{                                                                                                               \
    m_mainJobRequestId = GenerateHash(m_jobId);                                                                 \
    HCPTSP::getInstance().reset(m_mainJobRequestId);                                                            \
    INFOLOG("Enter %s, jobId: %s, subJobId: %s", m_jobCtrlPhase.c_str(), m_jobId.c_str(), m_subJobId.c_str());  \
}

#define EXIT                                                                                                    \
{                                                                                                               \
    INFOLOG("Exit %s, jobId: %s, subJobId: %s", m_jobCtrlPhase.c_str(), m_jobId.c_str(), m_subJobId.c_str());   \
}

namespace {
    constexpr auto MODULE = "HetroBackupJob";
    constexpr uint32_t DEFAULT_AGGREGATE_NUM = 32;
    constexpr uint32_t MAX_AGGREGATE_NUM = 1000;
    constexpr uint32_t SCANNER_REPORT_CIRCLE_TIME = ONE_MINUTE; /* seconds */
    constexpr uint32_t DEFAULT_SMB_BLOCK_SIZE = 128 * 1024; // 128k
    constexpr uint32_t NUM_10 = 10;
    constexpr uint32_t REPORT_RUNNING_INTERVAL = 10;
    constexpr uint32_t REPORT_RUNNING_TIMES = 6;
    const uint32_t BACKUP_RETRY_CNT = 3;
    const int DEFAULT_SUB_JOB_CNT = 25;
    // X3000 Internal Agent Max Channel number
    const int INTERNAL_MAX_CHANNEL_NUM = 15;
    const std::vector<std::string> SCAN_SKIP_DIRS = { ".", "..", ".snapshot", "~snapshot", "lost+found", ".etc" };
}


HetroBackupJob::~HetroBackupJob()
{
    Module::CleanMemoryPwd(m_dataFs.auth.authPwd);
    Module::CleanMemoryPwd(m_dataFs.auth.extendInfo);
    Module::CleanMemoryPwd(m_metaFs.auth.authPwd);
    Module::CleanMemoryPwd(m_metaFs.auth.extendInfo);
    Module::CleanMemoryPwd(m_cacheFs.auth.authPwd);
    Module::CleanMemoryPwd(m_cacheFs.auth.extendInfo);
    Module::CleanMemoryPwd(m_nasHost.auth.authPwd);
    Module::CleanMemoryPwd(m_nasHost.auth.extendInfo);
    Module::CleanMemoryPwd(m_nasShare.auth.authPwd);
    Module::CleanMemoryPwd(m_nasShare.auth.extendInfo);
}

bool HetroBackupJob::GetBackupJobInfo()
{
    if (GetJobInfo() != nullptr) {
        m_backupJobPtr = std::dynamic_pointer_cast<AppProtect::BackupJob>(GetJobInfo()->GetJobInfo());
    }
    if (m_backupJobPtr == nullptr) {
        ReportJobDetailsWithErrorCode(
            SubJobStatus::FAILED, PROGRESS0, "nas_plugin_hetro_backup_data_fail_label", JobLogLevel::TASK_LOG_ERROR,
            HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        HCP_Log(ERR, MODULE) << "Failed to get backupJobPtr." << HCPENDLOG;
        return false;
    }
    SetMainJobId(m_backupJobPtr->jobId);
    SetSubJobId();
    return true;
}

int HetroBackupJob::PrerequisiteJob()
{
    if (!GetBackupJobInfo()) {
        SetJobToFinish();
        return Module::FAILED;
    }
    SetJobCtrlPhase(JOB_CTRL_PHASE_PREJOB);
    ENTER
    int ret = PrerequisiteJobInner();
    EXIT
    SetJobToFinish();
    return ret;
}

int HetroBackupJob::PrerequisiteJobInner()
{
    int64_t errCode = HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR;
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    if (!IsPigzExist()) {
        ReportJobDetails(SubJobStatus::FAILED, PROGRESS0,
            "plugin_check_pigz_failed_label", JobLogLevel::TASK_LOG_ERROR);
        return Module::FAILED;
    }

    if (!InitJobInfo()) {
        goto error;
    }
    // 添加路由
    if (!OperateIpsRule(m_IpRuleList, "ADD") ||
        !OperateIpsRuleForRemoteIp(m_remoteIpRuleList, "ADD", m_nasShare.nasShareExt.m_serviceIP)) {
        ERRLOG("add ip rule failed");
        goto error;
    }
    PluginUtils::CreateDirectory(m_failureRecordRoot);

    if (!IdentifyNasProtoVersionToUse(errCode)) {
        HCP_Log(ERR, MODULE) << "Identify service ip's and proto version failed." << HCPENDLOG;
        goto error;
    }

    PrintJobInfo();

    if (CreatSrcDir() != Module::SUCCESS) {
        goto error;
    }

    if (!CreateSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        goto error;
    }
    m_generalInfo.m_jobStartTime = GetCurrentTimeInSeconds();

    if (!SetupDataFsForBackupJob() || !SetupMetaFsForBackupJob() || !SetupRemoteNasForBackupJob()) {
        goto error;
    }

    if (!UpdateGeneralResource(m_jobId, m_generalInfo)) {
        goto error;
    }

    ReportJobDetails(SubJobStatus::COMPLETED, PROGRESS100, "", JobLogLevel::TASK_LOG_INFO);
    return Module::SUCCESS;

error:
    if (!m_generalInfo.m_remoteNasShareSnapshotName.empty()) {
        HetroCommonService::DeleteSnapshot(m_nasHost, m_nasShare, m_generalInfo.m_remoteNasShareSnapshotName, errCode);
    }
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS
        && m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        CleanKrbFilesForCifs(m_jobId);
    }
    ReportJobDetailsWithErrorCode(SubJobStatus::FAILED, PROGRESS0,
        "nas_plugin_hetro_backup_prepare_fail_label", JobLogLevel::TASK_LOG_ERROR, errCode);
    return Module::FAILED;
}

int HetroBackupJob::GenerateSubJob()
{
    if (!GetBackupJobInfo()) {
        SetJobToFinish();
        return Module::FAILED;
    }
    SetJobCtrlPhase(JOB_CTRL_PHASE_GENSUBJOB);
    ENTER
    m_generateSubjobFinish = false;
    // keep alive thread , used for report main job
    std::thread keepAlive = std::thread(&HetroBackupJob::KeepPluginAlive, this);
    RegisterScanTask();
    std::shared_ptr<void> defer(nullptr, [&](...) { ReleaseScanTask(); });
    int ret = GenerateSubJobInner();
    m_generateSubjobFinish = true;
    if (keepAlive.joinable()) {
        keepAlive.join();
        INFOLOG("keep alive thread join!");
    }
    EXIT
    SetJobToFinish();
    return ret;
}

void HetroBackupJob::ReportJobCompleteStatusForGenerateJob(HetroNativeScanStatistics &scanStatistics,
    SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    int64_t errorCode = HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR;

    if (jobStatus == SubJobStatus::COMPLETED) {
        ReportScannerCompleteStatus(scanStatistics);
    } else {
        if (m_scanStatus == SCANNER_STATUS::PROTECTED_SERVER_NOT_REACHABLE) {
            errorCode = HomoErrorCode::ERROR_NAS_SCAN_PROTECTED_SERVER_NOT_REACHABLE;
        } else if (m_scanStatus ==  SCANNER_STATUS::SECONDARY_SERVER_NOT_REACHABLE) {
            errorCode = HomoErrorCode::ERROR_NAS_SCAN_SECONDARY_SERVER_NOT_REACHABLE;
        } else if (m_scanStatus == SCANNER_STATUS::INCOMPLETE_SCAN_REACH_LIMIT) {
            ReportJobDetails(jobStatus, jobProgress, "nas_plugin_hetro_backup_scan_too_many_fail_label",
                JobLogLevel::TASK_LOG_ERROR);
            return;
        }

        ReportJobDetailsWithErrorCode(jobStatus, jobProgress, jobLogLabel, JobLogLevel::TASK_LOG_ERROR, errorCode);
    }

    return;
}

bool HetroBackupJob::StartScanner()
{
    ScanConfig scanConfig {};
    FillScanConfig(scanConfig);

    m_scanner = ScanMgr::CreateScanInst(scanConfig);
    if (m_scanner == nullptr) {
        ERRLOG("Start Scanner failed!");
        return false;
    }
    
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
        m_scanner->Enqueue(".");
    } else if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        m_scanner->Enqueue("");
    }

    if (m_scanner->Start() != SCANNER_STATUS::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Start scanner instance failed!" << HCPENDLOG;
        m_scanner->Destroy();
        return false;
    }
    m_scanStats.m_scanStarted = true;
    if (!HetroCommonService::UpdateScanStatsResource(m_jobId, m_scanStats)) {
        HCP_Log(ERR, MODULE) << "Update Scan Stats failed!" << HCPENDLOG;
        return false;
    }
    // 初始化shareResource的上报时间
    ShareResourceManager::GetInstance().CanReportStatToPM(m_jobId + "_backup");
    return true;
}

bool HetroBackupJob::GetTheLastCopyId()
{
    if (IsFullBackup()) {
        DBGLOG("Full backup, do not need get last copy, jobId: %s", m_jobId.c_str());
        return true; // 全量备份不需要获取上一个副本的信息
    }
    DBGLOG("Enter GetTheLastCopyId, Due to incremental backup, jobId: %s", m_jobId.c_str());
    Copy lastCopy;
    std::set<AppProtect::CopyDataType> dataTypes {
        AppProtect::CopyDataType::INCREMENT_COPY,
        AppProtect::CopyDataType::FULL_COPY,
        AppProtect::CopyDataType::PERMANENT_INCREMENTAL_COPY
    };
    Application appInfo = m_backupJobPtr->protectObject;
    string copyId = m_backupJobPtr->copy.id;
    int retryCnt = 0;
    do {
        if (retryCnt % MIDDLE_EXECEPTION_RETRY_TIMES == 0) {
            KeepJobAlive(m_jobId, m_subJobId);
        }
        try {
            // copyId需要穿空值，获取最新的副本
            JobService::QueryPreviousCopy(lastCopy, appInfo, dataTypes, "", m_jobId);
        } catch (AppProtectFrameworkException &e) {
            WARNLOG("QueryPreviousCopy failed, code: %d, retry: %d, jobId: %s", e.code, retryCnt, m_jobId.c_str());
            if (e.code == E_QUERIED_RESOURCE_NOT_EXIST) {
                ERRLOG("Exit QueryPreviousCopy, jobId: %s, last copy not exist!", m_jobId.c_str());
                return false;
            }
            sleep(EXECEPTION_RETRY_INTERVAL);
            continue;
        } catch (...) {
            ERRLOG("unknown exception happened, QueryPreviousCopy failed!");
            return false;
        }
        m_lastCopyId = lastCopy.id;
        DBGLOG("Exit QueryPreviousCopy, queryLastCopy success: %s for job: %s", m_lastCopyId.c_str(), m_jobId.c_str());
        return true;
    } while (++retryCnt <= EXECEPTION_RETRY_TIMES);
    ERRLOG("Exit QueryPreviousCopy, failed to queryLastCopy for job: %s, after max retry & failed", m_jobId.c_str());
    return false;
}

int HetroBackupJob::GenerateSubJobInner()
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0,
        "nas_plugin_hetro_backup_scan_start_label", JobLogLevel::TASK_LOG_INFO);
    m_lastScannerReportTime = GetCurrentTimeInSeconds();
    HetroNativeScanStatistics scanStatistics {};
    int jobProgress = 0;
    SubJobStatus::type jobStatus = SubJobStatus::FAILED;
    std::string jobLogLabel = "nas_plugin_hetro_backup_scan_fail_label";

    scanStatistics.m_scanStartTime = GetCurrentTimeInSeconds();

    if (!InitJobInfo()) {
        HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
        goto error;
    }
    PrintJobInfo();
    if (!GetSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        HCP_Log(ERR, MODULE) << "GetSharedResources failed" << HCPENDLOG;
        goto error;
    }
    // 对于netapp快照格式，使用nfs4挂载用于扫描获取inode
    if (NeedMountUseNfs4() && !MountNfs4ForNetapp()) {
        goto error;
    }
    if (IsAbortJob()) {
        ERRLOG("Job aborted, skip scanner.");
        goto error;
    }
    if (!StartScanner()) {
        HCP_Log(ERR, MODULE) << "Start Scanner Failed" << HCPENDLOG;
        goto error;
    }
    INFOLOG("HetroBackup - Scanner Start!");
    MonitorScanner(scanStatistics, jobStatus, jobLogLabel, jobProgress);
    INFOLOG("HetroBackup - Scanner Finish!");
error:
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS
        && m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        CleanKrbFilesForCifs(m_jobId);
    }
    if (!m_tempLocalMountPath.empty() && NeedMountUseNfs4() &&
        PluginUtils::UnmountNas(m_tempLocalMountPath) != Module::SUCCESS) {
        ERRLOG("Unmount remote nas failed for share: %s", m_nasShare.sharePath.c_str());
    }
    ReportJobCompleteStatusForGenerateJob(scanStatistics, jobStatus, jobLogLabel, jobProgress);
    return Module::SUCCESS;
}

bool HetroBackupJob::NeedMountUseNfs4()
{
    // 仅netapp存储nfs共享的快照格式需要挂载
    if (m_nasHost.vendorSubType == NAS_VENDOR_TYPE_STR_NETAPP_ONTAP9 &&
        m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS && IsSnapshotEnabled()) {
        INFOLOG("This device is netapp, protocol is nfs, snapshot is on.");
        return true;
    }
    return false;
}

bool HetroBackupJob::MountNfs4ForNetapp()
{
    std::string nasMountOptions = NFS_MOUNT_OPTION;
    std::string outSelectSvcIp;
    NasMountParams param {
        "nfs", "4", m_generalInfo.m_remoteNasShareSnapshotPath, m_tempLocalMountPath, nasMountOptions,
        "", "", {m_nasShare.nasShareExt.m_serviceIP}};
    if (PluginUtils::MountNFS(param, outSelectSvcIp) != Module::SUCCESS) {
        ERRLOG("Mount remote nas failed for share: %s", m_nasShare.sharePath.c_str());
        return false;
    }
    return true;
}

int HetroBackupJob::ExecuteSubJob()
{
    AddSubJobToChannelMgr();
    std::shared_ptr<void> delSubJob(nullptr, [&](...) {
        RemoveSubJobFromChannelMgr();
    });

    if (!GetBackupJobInfo()) {
        SetJobToFinish();
        return Module::FAILED;
    }
    SetJobCtrlPhase(JOB_CTRL_PHASE_EXECSUBJOB);
    ENTER;
    int ret = ExecuteSubJobInner();
    EXIT;
    SetJobToFinish();
    return ret;
}

int HetroBackupJob::ExecuteSubJobInner()
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    std::shared_ptr<void> defer(nullptr, [&](...) {
        OperateIpsRule(m_IpRuleList, "DELETE");
        OperateIpsRuleForRemoteIp(m_remoteIpRuleList, "DELETE", m_nasShare.nasShareExt.m_serviceIP);
    });
    if (!InitJobInfo() || !OperateIpsRule(m_IpRuleList, "ADD") ||
        !OperateIpsRuleForRemoteIp(m_remoteIpRuleList, "ADD", m_nasShare.nasShareExt.m_serviceIP)) {
        HCP_Log(ERR, MODULE) << "Init Job Info failed" << HCPENDLOG;
        ReportJobDetailsWithErrorCode(
            SubJobStatus::FAILED, PROGRESS0, "nas_plugin_hetro_backup_data_fail_label", JobLogLevel::TASK_LOG_ERROR,
            HetroErrorCode::BACKUP_PARAMETER_CHECK_FAILED_ERROR_CODE);
        return Module::FAILED;
    }

    PrintSubJobInfo(m_subJobInfo);
    HetroBackupSubJob backupSubJob {};
    if (!Module::JsonHelper::JsonStringToStruct(m_subJobInfo->jobInfo, backupSubJob)) {
        HCP_Log(ERR, MODULE) << "Get backup subjob info failed" << HCPENDLOG;
        ReportJobDetails(SubJobStatus::FAILED, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        return Module::FAILED;
    }
    m_subJobRequestId = GenerateHash(m_jobId + m_subJobId);
    HCP_Log(INFO, MODULE) << "mainJob ID: " << m_jobId << ", subJobID: " << m_subJobId
        << ", subJobRequestId: 0x" << setw(NUMBER8) << setfill('0')
        << hex << (m_subJobRequestId & 0xFFFFFFFF) << dec << HCPENDLOG;
    HCPTSP::getInstance().reset(m_subJobRequestId);

    m_subTaskType = backupSubJob.m_SubTaskType;
    if (m_subTaskType == SUBJOB_TYPE_TEARDOWN_PHASE) {
        return ExecuteTeardownSubJobInner(backupSubJob);
    } else if (m_subTaskType == SUBJOB_TYPE_COPYMETA_PHASE) {
        return ExecuteCopyMetaSubJobInner(backupSubJob);
    } else if (m_subTaskType == SUBJOB_TYPE_CREATE_SUBJOB_PHASE) {
        m_generateSubjobFinish = false;
        std::thread keepAlive = std::thread(&HetroBackupJob::KeepPluginAlive, this);
        int ret = ExecuteCreateSubJobInner(backupSubJob);
        m_generateSubjobFinish = true;
        keepAlive.join();
        return ret;
    } else if (m_subTaskType == SUBJOB_TYPE_CHECK_SUBJOB_PHASE) {
        return ExecuteCheckSubJobInner();
    } else {
        return ExecuteBackupSubJobInner(backupSubJob);
    }

    return Module::SUCCESS;
}

void HetroBackupJob::ReportJobProgress(SubJobStatus::type &jobStatus, std::string &jobLogLabel)
{
    if (jobStatus ==  SubJobStatus::COMPLETED) {
        ReportJobDetails(SubJobStatus::COMPLETED, PROGRESS100, "", JobLogLevel::TASK_LOG_INFO);
    } else {
        int64_t errCode = HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR;
        switch (m_backupStatus) {
            case BackupPhaseStatus::FAILED_NOACCESS:
                errCode = HomoErrorCode::ERROR_BACKUP_FAILED_NOACCESS_ERROR;
                break;
            case BackupPhaseStatus::FAILED_NOSPACE:
                errCode = HomoErrorCode::ERROR_BACKUP_FAILED_NOSPACE_ERROR;
                break;
            case BackupPhaseStatus::FAILED_PROT_SERVER_NOTREACHABLE:
                errCode = HomoErrorCode::ERROR_NAS_BACKUP_PROTECTED_SERVER_NOT_REACHABLE;
                break;
            case BackupPhaseStatus::FAILED_SEC_SERVER_NOTREACHABLE:
                errCode = HomoErrorCode::ERROR_NAS_BACKUP_SECONDARY_SERVER_NOT_REACHABLE;
                break;
            default:
                errCode = HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR;
        }

        ReportJobDetailsWithErrorCode(jobStatus, PROGRESS0, jobLogLabel, JobLogLevel::TASK_LOG_ERROR, errCode);
    }

    return;
}

bool HetroBackupJob::SkipDeleteStage(const HetroBackupSubJob& backupSubJob)
{
    if (!IsFullBackup() && IsAggregate() && backupSubJob.m_ControlFile.find("delete_") != string::npos) {
        INFOLOG("Not handle delete_ control file for Aggregated increment backup now");
        ReportJobDetails(SubJobStatus::COMPLETED, PROGRESS100, "", JobLogLevel::TASK_LOG_INFO);
        return true;
    }
    return false;
}

int HetroBackupJob::ExecuteBackupSubJobInner(HetroBackupSubJob backupSubJob)
{
    INFOLOG("Enter ExecuteBackupSubJobInner: %s, %s, controlFile: %s, type:%u",
        m_jobId.c_str(), m_subJobId.c_str(), backupSubJob.m_ControlFile.c_str(), m_subTaskType);

    if (SkipDeleteStage(backupSubJob)) {
        return Module::SUCCESS;
    }

    BackupStats backupStatistics {};
    int jobProgress = 0;
    int retryCnt = 0;
    SubJobStatus::type jobStatus = SubJobStatus::FAILED;
    std::string jobLogLabel = "nas_plugin_hetro_backup_data_fail_label";

    g_nodeLevelTaskInfo.Insert(m_jobId);
    g_nodeLevelTaskInfo.IncrSubTasksCount();
    PrintJobInfo();
    if (!GetSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        HCP_Log(ERR, MODULE) << "GetSharedResources failed" << HCPENDLOG;
        goto error;
    }
    if (!UpdateBackupStartTimeInSharedResource(backupSubJob)) {
        HCP_Log(ERR, MODULE) << "Update Backup info in Shared Resource failed" << HCPENDLOG;
        goto error;
    }
    /* update the control file path */
    backupSubJob.m_ControlFile = m_cacheFsPath + backupSubJob.m_ControlFile;
    MONITOR_BACKUP_RES_TYPE monitorRet;
    do {
        if (!StartBackup(backupSubJob)) {
            HCP_Log(ERR, MODULE) << "StartBackup failed" << HCPENDLOG;
            break;
        }
        monitorRet = MonitorBackup(backupStatistics, jobStatus, jobLogLabel, jobProgress);
        if (m_backup != nullptr) {
            m_backup->Destroy();
            m_backup.reset();
        }
    } while (monitorRet == MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_NEEDRETRY && ++retryCnt < BACKUP_RETRY_CNT);
    if (retryCnt >= BACKUP_RETRY_CNT && monitorRet == MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_NEEDRETRY) {
        // seems this sub job is stuck for some reason , copy this control file to meta repo for further check
        WARNLOG("subjob is stuck, %s, copy controlFile: %s", m_subJobId.c_str(), backupSubJob.m_ControlFile.c_str());
        CopyFile(backupSubJob.m_ControlFile, m_metaFsPath);
    }

error:
    g_nodeLevelTaskInfo.DecrSubTasksCount();
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS
        && m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        CleanKrbFilesForCifs(m_jobId);
    }

    m_dataSize = backupStatistics.noOfBytesCopied/NUMBER1024;
    ReportJobProgress(jobStatus, jobLogLabel);
    return Module::SUCCESS;
}

bool HetroBackupJob::IsSubTaskStatsFileExists()
{
    boost::system::error_code ec;
    std::string filePath = m_metaFsPath + "/statistics_" + m_jobId + "/backup-stats-sub-" + m_subJobId + ".json";

    if (boost::filesystem::exists(filePath, ec)) {
        return true;
    }
    return false;
}

bool HetroBackupJob::CalculateJobStats()
{
    uint64_t noOfDirCopiedFromDirMtimePhase = 0;
    uint64_t noOfDirFailedFromDirMtimePhase = 0;
    uint32_t noOfFiles = 0;
    HetroNativeBackupStats mainJobStats {};

    std::string dir = m_metaFsPath + "/statistics_" + m_jobId;
    std::vector<std::string> fileList {};
    if (!GetFileListInDirectory(dir, fileList)) {
        HCP_Log(ERR, MODULE) << "Get filelist for dir failed: " << dir << HCPENDLOG;
        return false;
    }
    uint32_t i = 0;
    for (i = 0; i < fileList.size(); ++i) {
        if (IsAbortJob()) {
            HCP_Log(WARN, MODULE) << "Abort invoked for the job" << HCPENDLOG;
            return true;
        }
        if (noOfFiles++ % NUMBER50 == 0) {
            /* Since this loop may take too much time, report progress to framework, every 50 files */
            ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        }
        std::string path = fileList[i];
        HetroNativeBackupStats subJobStats {};
        ReadBackupStatsFromFile(path, subJobStats);
        if (path.find("dirmtime") != std::string::npos) {
            noOfDirCopiedFromDirMtimePhase += subJobStats.m_noOfDirCopied;
            noOfDirFailedFromDirMtimePhase += subJobStats.m_noOfDirFailed;
        } else {
            mainJobStats += subJobStats;
        }
        INFOLOG("CalculateJobStats for control file : %s, jobPhrase: %u, noOfDirToBackup: %llu,"
            "noOfFilesToBackup: %llu, noOfBytesToBackup: %llu, noOfDirToDelete: %llu,"
            "noOfFilesToDelete: %llu, noOfDirCopied: %llu, noOfFilesCopied: %llu,"
            "noOfBytesCopyed: %llu, noOfDirDeleted: %llu, noOfDirFailed: %llu,"
            "noOfFilesFailed: %llu, noOfSrcRetryCnt: %llu, noOfDstRetryCnt: %llu",
            path.c_str(), subJobStats.m_subJobPhrase, subJobStats.m_noOfDirToBackup,
            subJobStats.m_noOfFilesToBackup, subJobStats.m_noOfBytesToBackup, subJobStats.m_noOfDirToDelete,
            subJobStats.m_noOfFilesToDelete, subJobStats.m_noOfDirCopied, subJobStats.m_noOfFilesCopied,
            subJobStats.m_noOfBytesCopied, subJobStats.m_noOfDirDeleted, subJobStats.m_noOfDirFailed,
            subJobStats.m_noOfFilesFailed, subJobStats.m_noOfSrcRetryCount, subJobStats.m_noOfDstRetryCount);
        RemoveFile(path);
    }
    /* Only for dirs copied, we use the stats from dirmtime since its accurate */
    // 聚合备份没有dir阶段， 上报备份文件夹需要将所有的子任务相加， 不是聚合备份上报备份文件夹只取dir阶段的计数
    if (!IsAggregate()) {
        mainJobStats.m_noOfDirCopied = noOfDirCopiedFromDirMtimePhase;
        mainJobStats.m_noOfDirFailed = noOfDirFailedFromDirMtimePhase;
    }
    std::string filePath = m_metaFsPath + "/statistics_" + m_jobId + "/backup-stats-main-" + m_jobId + ".json";
    WriteBackupStatsToFile(filePath, mainJobStats);
    return true;
}

void HetroBackupJob::FillBackupCopyParam(HetroLastCopyDetails &newBackupCopy)
{
    newBackupCopy.m_backupFormat = m_dataLayoutExt.m_backupFormat;
    newBackupCopy.m_metadataBackupType = m_dataLayoutExt.m_metadataBackupType;
    newBackupCopy.m_backupFilter = m_nasShare.nasShareExt.m_filters;
    newBackupCopy.m_protocolVersion = m_generalInfo.m_protocolVersion;
    newBackupCopy.m_lastBackupTime = m_generalInfo.m_remoteNasShareSnapshotTime;
    newBackupCopy.m_isArchiveSupportHardlink = "true";
    return;
}

int HetroBackupJob::ExecuteCreateSubJobInner(HetroBackupSubJob backupSubJob)
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);

    HCP_Log(INFO, MODULE) << "Enter ExecuteCreateSubJobInner" << HCPENDLOG;

    SubJobStatus::type jobStatus = SubJobStatus::FAILED;
    std::string jobLogLabel {};

    if (!InitRepoPaths()) {
        HCP_Log(ERR, MODULE) << "InitRepoPaths failed" << HCPENDLOG;
        ReportJobDetailsWithErrorCode(jobStatus, PROGRESS0, jobLogLabel, JobLogLevel::TASK_LOG_ERROR,
            HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
    }

    PrintJobInfo();

    std::string scanCtrlFilePath = m_cacheFsPath + "/backup-job/scan/ctrl";
    std::string backupCtrlFilePath = m_cacheFsPath + "/backup-job/backup/ctrl";
    std::string scanMetaFilePath = m_cacheFsPath + "/backup-job/scan/meta/latest";

    if (!HandleMonitorScannerCompletion(jobStatus, jobLogLabel, scanCtrlFilePath, scanMetaFilePath,
        backupCtrlFilePath)) {
        ReportJobDetailsWithErrorCode(jobStatus, PROGRESS0, jobLogLabel, JobLogLevel::TASK_LOG_ERROR,
            HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        return Module::FAILED;
    }

    ReportJobDetails(SubJobStatus::COMPLETED, PROGRESS100, "", JobLogLevel::TASK_LOG_INFO);

    return Module::SUCCESS;
}

int HetroBackupJob::ExecuteTeardownSubJobInner(HetroBackupSubJob backupSubJob)
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);

    HetroLastCopyDetails newBackupCopy {};
    HCP_Log(INFO, MODULE) << "Enter ExecuteTeardownSubJobInner" << HCPENDLOG;

    PrintJobInfo();
    if (!GetSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        HCP_Log(ERR, MODULE) << "GetSharedResources failed" << HCPENDLOG;
        goto error;
    }

    FillBackupCopyParam(newBackupCopy);

    // write backup-copy-meta.json to root of meta repo
    {
        string rootJsonPath = m_metaFs.path[0] + BACKUP_COPY_METAFILE;
        if (!WriteBackupCopyToFile(rootJsonPath, newBackupCopy)) {
            HCP_Log(ERR, MODULE) << "write root json file failed" << HCPENDLOG;
            goto error;
        }
    }

    // write backup-copy-meta.json to metaRepo/copyID again if is aggregation task
    if (IsAggregate()) {
        string jsonFilePath = m_metaFs.path[0] + "/" + m_backupJobPtr->copy.id + BACKUP_COPY_METAFILE;
        if (!WriteBackupCopyToFile(jsonFilePath, newBackupCopy)) {
            HCP_Log(ERR, MODULE) << "write aggr copy json file failed" << HCPENDLOG;
            goto error;
        }
    }

    /* TO-DO: Handle error */
    HetroBackupJob::SaveScannerMeta();
    CalculateJobStats();
    ReportBackupCompletionStatus();
    return Module::SUCCESS;

error:
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS
        && m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        CleanKrbFilesForCifs(m_jobId);
    }

    ReportJobDetailsWithErrorCode(SubJobStatus::FAILED, PROGRESS0,
        "nas_plugin_hetro_backup_data_fail_label", JobLogLevel::TASK_LOG_ERROR,
        HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
    return Module::FAILED;
}

int HetroBackupJob::ExecuteCheckSubJobInner()
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    PrintJobInfo();
    CalculateJobStats();
    HetroNativeBackupStats backupStatistics {};
    std::string filePath = m_metaFsPath + "/statistics_" + m_jobId + "/backup-stats-main-" + m_jobId + ".json";
    ReadBackupStatsFromFile(filePath, backupStatistics);
    if (backupStatistics.m_noOfDirFailed == 0 && backupStatistics.m_noOfFilesFailed == 0) {
        ReportJobDetails(SubJobStatus::COMPLETED, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    } else {
        WARNLOG("some of files or dirs failed, set main job to partial success");
        ReportJobDetails(SubJobStatus::FAILED, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    }
    return Module::SUCCESS;
}

int HetroBackupJob::ExecuteCopyMetaSubJobInner(HetroBackupSubJob backupSubJob)
{
    INFOLOG("Copy Meta sub job start.");
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);

    std::string scanMetaFilePath1;
    std::thread monitorCopyThread1;
    PrintJobInfo();
    scanMetaFilePath1 = m_cacheFsPath + "/backup-job/scan/meta/latest";
    m_isCopying = true;
    m_isZipSuccess = true;
    HCP_Log(INFO, MODULE) << "Begin Copy Meta file! : " << scanMetaFilePath1 << HCPENDLOG;
    monitorCopyThread1 = std::thread(&HetroBackupJob::CopyMetaFileToMetaRepo, this, scanMetaFilePath1);

    while (m_isCopying) {
        HCP_Log(INFO, MODULE) << "Wait for copy finish!" << HCPENDLOG;
        SendJobReportForAliveness();
        std::this_thread::sleep_for(std::chrono::seconds(GENERATE_SUBTASK_MONITOR_DUR_IN_SEC));
    }
    monitorCopyThread1.join();

    if (!m_isZipSuccess) {
        ReportJobDetailsWithErrorCode(
            SubJobStatus::FAILED, PROGRESS0, "nas_plugin_hetro_backup_data_fail_label", JobLogLevel::TASK_LOG_ERROR,
            HomoErrorCode::INTERNAL_ERROR_CODE);
        return Module::FAILED;
    }

    HCP_Log(INFO, MODULE) << "Copy Meta sub job finish" << HCPENDLOG;
    ReportJobDetails(SubJobStatus::COMPLETED, PROGRESS100, "", JobLogLevel::TASK_LOG_INFO);
    return Module::SUCCESS;
}

int HetroBackupJob::PostJob()
{
    if (!GetBackupJobInfo()) {
        SetJobToFinish();
        return Module::FAILED;
    }
    SetJobCtrlPhase(JOB_CTRL_PHASE_POSTJOB);
    AddSubJobToChannelMgr();
    std::shared_ptr<void> delSubJob(nullptr, [&](...) {
        RemoveSubJobFromChannelMgr();
    });
    ENTER
    int ret = PostJobInner();
    EXIT
    SetJobToFinish();
    return ret;
}

int HetroBackupJob::PostJobInner()
{
    HetroNativeBackupStats backupStatistics {};
    std::string filePath {};
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);

    if (!InitJobInfo()) {
        HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
        return HandlePostJobFailed();
    }
    if (!OperateIpsRule(m_IpRuleList, "DELETE") ||
        !OperateIpsRuleForRemoteIp(m_remoteIpRuleList, "DELETE", m_nasShare.nasShareExt.m_serviceIP)) {
        ERRLOG("delete ip rule failed");
        return HandlePostJobFailed();
    }
    PrintJobInfo();
    MergeBackupFailureRecords(m_cacheFsPath);

    if (!GetSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        HCP_Log(ERR, MODULE) << "GetSharedResources failed" << HCPENDLOG;
        return HandlePostJobFailed();
    }
    if (!m_tempLocalMountPath.empty() && NeedMountUseNfs4() &&
        PluginUtils::CheckAndUnmount(m_tempLocalMountPath) != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "PostJob try umount failed" << HCPENDLOG;
    }

    // Delete snapshot
    WindupRemoteNasForBackupJob();

    // 删除落盘的证书
    if (!RemoveCertification(m_backupJobPtr->protectEnv)) {
        return HandlePostJobFailed();
    }
    if (!PostReportCopyAdditionalInfo()) {
        HCP_Log(ERR, MODULE) << "PostReportCopyAdditionalInfo failed" << HCPENDLOG;
        return HandlePostJobFailed();
    }

    DeleteSharedResources(m_jobId);

    filePath = m_metaFsPath + "/statistics_" + m_jobId + "/backup-stats-main-" + m_jobId + ".json";
    ReadBackupStatsFromFile(filePath, backupStatistics);
    RemoveCacheDirectories();
    PrintBackupCopyInfo(backupStatistics);
    ReportJobDetails(SubJobStatus::COMPLETED, PROGRESS100, "", JobLogLevel::TASK_LOG_INFO);
    g_nodeLevelTaskInfo.Erase(m_jobId);
    return Module::SUCCESS;
}

bool HetroBackupJob::PostReportCopyAdditionalInfo()
{
    if (m_jobResult == AppProtect::JobResult::type::SUCCESS) {
        Copy image;
        image.__set_id(m_backupJobPtr->copy.id); // 副本ID
        image.__set_formatType(m_backupJobPtr->copy.formatType);
        HCP_Log(INFO, MODULE) << "m_backupJobPtr->copy.formatType:" << m_backupJobPtr->copy.formatType << HCPENDLOG;
        std::string extendInfo;
        if (!GetAggCopyExtendInfo(extendInfo)) {
            return false;
        }
        DBGLOG("image set extendinfo %s", extendInfo.c_str());
        image.__set_extendInfo(extendInfo);

        // 构造数据仓和meta仓地址上报给UBC
        vector<StorageRepository> repositories;
        BuildCopyRepositories(repositories);
        image.__set_repositories(repositories);

        int retryCnt = 0;
        ActionResult returnValue;
        do {
            if (retryCnt == MIDDLE_NORMAL_RETRY_TIMES) {
                KeepJobAlive(m_jobId, m_subJobId);
            }
            JobService::ReportCopyAdditionalInfo(returnValue, m_jobId, image);
            if (returnValue.code == MP_SUCCESS) {
                DBGLOG("Exit ReportCopyAdditionalInfo, success report image: %s", WIPE_SENSITIVE(image).c_str());
                return true;
            }
            sleep(NORMAL_RETRY_INTERVAL);
            WARNLOG("ReportCopyAdditionalInfo failed, image: %s, retry: %d", WIPE_SENSITIVE(image).c_str(), retryCnt);
        } while (++retryCnt <= NORMAL_RETRY_TIMES);
        ERRLOG("Exit ReportCopyAdditionalInfo, failed report image: %s after maxRetry", WIPE_SENSITIVE(image).c_str());
        return false;
    }
    return true;
}

void HetroBackupJob::BuildCopyRepositories(vector<StorageRepository>& repositories)
{
    string dataRemotePath = IsAggregate() ? (m_dataFs.remotePath + "/" + m_backupJobPtr->copy.id) :
        m_dataFs.remotePath;
    string metaRemotePath = IsAggregate() ? (m_metaFs.remotePath + "/" + m_backupJobPtr->copy.id) :
        m_metaFs.remotePath;
    DBGLOG("report info for aggragation, dataRemotePath = %s, metaRemotePath = %s",
        dataRemotePath.c_str(), metaRemotePath.c_str());

    StorageRepository metaStorageRep;
    metaStorageRep.__set_id(m_metaFs.id); // 文件系统ID
    metaStorageRep.__set_repositoryType(RepositoryDataType::META_REPOSITORY);
    metaStorageRep.__set_isLocal(m_metaFs.isLocal);
    metaStorageRep.__set_remotePath(metaRemotePath);
    metaStorageRep.__set_remoteHost(m_metaFs.remoteHost);
    metaStorageRep.__set_protocol(m_metaFs.protocol);
    metaStorageRep.__set_extendInfo(m_metaFs.extendInfo);
    repositories.push_back(metaStorageRep);

    StorageRepository dataStorageRep;
    dataStorageRep.__set_id(m_dataFs.id);
    dataStorageRep.__set_repositoryType(RepositoryDataType::DATA_REPOSITORY);
    dataStorageRep.__set_isLocal(m_dataFs.isLocal);
    dataStorageRep.__set_remotePath(dataRemotePath);
    dataStorageRep.__set_remoteHost(m_dataFs.remoteHost);
    dataStorageRep.__set_protocol(m_dataFs.protocol);
    dataStorageRep.__set_extendInfo(m_dataFs.extendInfo);
    repositories.push_back(dataStorageRep);
}


void HetroBackupJob::InitMetaBackupFs()
{
    for (unsigned int i = 0; i < m_backupJobPtr->repositories.size(); i++) {
        if (m_backupJobPtr->repositories[i].repositoryType == RepositoryDataType::CACHE_REPOSITORY) {
            m_cacheFs = m_backupJobPtr->repositories[i];
        } else if (m_backupJobPtr->repositories[i].repositoryType == RepositoryDataType::DATA_REPOSITORY) {
            m_dataFs = m_backupJobPtr->repositories[i];
            m_localStorageIps = m_dataFs.endpoint.ip;
        } else if (m_backupJobPtr->repositories[i].repositoryType == RepositoryDataType::META_REPOSITORY) {
            m_metaFs = m_backupJobPtr->repositories[i];
        }
    }
}

bool HetroBackupJob::InitJobInfo()
{
    if (!InitHostInfo() || !GetTheLastCopyId()) {
        HCP_Log(ERR, MODULE) << "init host info or Get last copy id failed" << HCPENDLOG;
        return false;
    }

    /* Protected NAS Share details */
    m_nasShare.id = m_backupJobPtr->protectObject.id;
    m_nasShare.sharePath = m_backupJobPtr->protectObject.name[0] == '/' ? m_backupJobPtr->protectObject.name :
         "/" + m_backupJobPtr->protectObject.name;
    m_nasShare.auth = m_backupJobPtr->protectObject.auth;
    if (!Module::JsonHelper::JsonStringToStruct(m_backupJobPtr->protectObject.extendInfo, m_nasShare.nasShareExt)) {
        HCP_Log(ERR, MODULE) << "JsonStringToStruct failed." << HCPENDLOG;
        return false;
    }
    ResolveDomain(m_nasShare.nasShareExt.m_serviceIP, m_remoteIpRuleList);
    /* Data layout details */
    if (!Module::JsonHelper::JsonStringToStruct(m_backupJobPtr->extendInfo, m_dataLayoutExt)) {
        HCP_Log(ERR, MODULE) << "Failed to parse protctEnv extendInfo json to struct" << HCPENDLOG;
        return false;
    }

    /* Aggregate config details */
    if (!Module::JsonHelper::JsonStringToStruct(m_backupJobPtr->extendInfo, m_aggrInfo)) {
        HCP_Log(ERR, MODULE) << "Failed to parse protctEnv extendInfo json to struct" << HCPENDLOG;
        return false;
    }

    /* Smb config details */
    if (!Module::JsonHelper::JsonStringToStruct(m_backupJobPtr->extendInfo, m_smbProtectionConfig)) {
        HCP_Log(ERR, MODULE) << "Failed to parse protctEnv extendInfo json to struct" << HCPENDLOG;
        return false;
    }

    /* hot cold mix error */
    bool errFlag = m_smbProtectionConfig.m_backupHotData != "0" && m_smbProtectionConfig.m_backupColdData != "0";
    if (errFlag) {
        ERRLOG("Hot data and cold data conflict!");
        return false;
    }

    /* MetaFs and BackupFs to be used */
    InitMetaBackupFs();
    m_tempLocalMountPath = "/mnt/" + m_nasShare.id;
    if (!CreateDirectory(m_tempLocalMountPath)) {
        HCP_Log(ERR, MODULE) << "Failed to create temp local mount path" << HCPENDLOG;
        return false;
    }
    if (!InitRepoPaths() || !InitBackupCopyMetaInfo()) {
        return false;
    }

    HCP_Log(INFO, MODULE) << "Befor KinitTGT: m_jobCtrlPhase: " << m_jobCtrlPhase
        << ", m_protocol: " << m_nasShare.nasShareExt.m_protocol
        << ", authType: " << m_nasShare.auth.authType << HCPENDLOG;

    /* kint to get TGT for CIFS share with kerberos authentication */
    if (!KinitTGT()) {
        HCP_Log(ERR, MODULE) << "Kinit TGT ticket failed for kerberos authtication." << HCPENDLOG;
        return false;
    }

    return true;
}

bool HetroBackupJob::InitHostInfo()
{
    /* Protected NAS Host details */
    m_nasHost.id = m_backupJobPtr->protectEnv.id;
    m_nasHost.name = m_backupJobPtr->protectEnv.name;
    m_nasHost.vendor = m_backupJobPtr->protectEnv.type;
    m_nasHost.vendorSubType = m_backupJobPtr->protectEnv.subType;
    m_nasHost.mgrIp = m_backupJobPtr->protectEnv.endpoint;
    m_nasHost.port = m_backupJobPtr->protectEnv.port;
    m_nasHost.auth = m_backupJobPtr->protectEnv.auth;
    if (!Module::JsonHelper::JsonStringToStruct(m_backupJobPtr->protectEnv.extendInfo, m_nasHost.nasHostExt)) {
        HCP_Log(ERR, MODULE) << "Failed to parse protctEnv extendInfo json to struct" << HCPENDLOG;
        return false;
    }
    // 检查是否校验CA证书，并将CA证书落盘
    if (!CertVerifyMgr(m_backupJobPtr->protectEnv, m_nasHost)) {
        return false;
    }
    return true;
}

bool HetroBackupJob::InitRepoPaths()
{
    if (m_cacheFs.path.size() == 0 || m_metaFs.path.size() == 0) {
        HCP_Log(ERR, MODULE) << "Received info is wrong " << "m_cacheFs.path.size() " << m_cacheFs.path.size()
            << "m_metaFs.path.size() " << m_metaFs.path.size() << HCPENDLOG;
        return false;
    }

    if (!(m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS && IsInnerAgent())) {
        // 内置cifs不传dataFs.path
        if (m_dataFs.path.size() == 0) {
            HCP_Log(ERR, MODULE) << "Received info is wrong " << "m_dataFs.path.size():" << m_dataFs.path.size()
                << HCPENDLOG;
            return false;
        }
        m_dataFsLocalMountPath = m_dataFs.path[g_nodeLevelTaskInfo.GetSubTasksCount() % m_dataFs.path.size()];
        m_dataFsPath = m_dataFs.path[g_nodeLevelTaskInfo.GetSubTasksCount() % m_dataFs.path.size()];
        m_dataFsPath = IsAggregate() ? (m_dataFsPath + "/" + m_backupJobPtr->copy.id) : m_dataFsPath;
    }

    m_dataFsSvcIp  = m_dataFs.remoteHost[g_nodeLevelTaskInfo.GetSubTasksCount() % m_dataFs.remoteHost.size()].ip;
    m_cacheFsSvcIp = m_cacheFs.remoteHost[g_nodeLevelTaskInfo.GetSubTasksCount() % m_cacheFs.remoteHost.size()].ip;
    m_metaFsSvcIp  = m_metaFs.remoteHost[g_nodeLevelTaskInfo.GetSubTasksCount() % m_metaFs.remoteHost.size()].ip;

    // 去安全容器之后，CIFS协议无法像NFS一样，通过127.0.0.1访问底座的文件服务，下发的还是逻辑端口，需要加路由
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        for (auto remoteHost : m_dataFs.remoteHost) {
            DBGLOG("remote host ip: %s added to ip rule list.", remoteHost.ip.c_str());
            m_IpRuleList.push_back(remoteHost.ip);
        }
    }

    // plugin can use any mounted cache path given by agent ,so using first one
    m_cacheFsPath = m_cacheFs.path[g_nodeLevelTaskInfo.GetSubTasksCount() % m_cacheFs.path.size()];
    HCP_Log(DEBUG, MODULE) << " m_cacheFs.remotePath: " << m_cacheFs.remotePath << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << " Before m_cacheFsPath: " << m_cacheFsPath << HCPENDLOG;
    m_cacheFsPath = GetPathName(m_cacheFsPath);
    HCP_Log(DEBUG, MODULE) << " After change m_cacheFsPath: " << m_cacheFsPath << HCPENDLOG;

    // plugin can use any mounted meta path given by agent ,so using first one
    m_metaFsPath = m_metaFs.path[g_nodeLevelTaskInfo.GetSubTasksCount() % m_metaFs.path.size()];
    m_metaFsPath = IsAggregate() ? (m_metaFsPath + "/" + m_backupJobPtr->copy.id) : m_metaFsPath;
    m_backupCopyMetaFile = IsAggregate() ? m_metaFs.path[0] + "/"  +  m_lastCopyId + BACKUP_COPY_METAFILE :
        m_metaFs.path[0] + BACKUP_COPY_METAFILE;
    HCP_Log(DEBUG, MODULE) << " m_metaFs.remotePath: "  << m_metaFs.remotePath << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << " m_metaFsPath: " << m_metaFsPath << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << " m_backupCopyMetaFile: "  << m_backupCopyMetaFile << HCPENDLOG;

    // plugin can use any mounted data path given by agent ,so using first one
    HCP_Log(DEBUG, MODULE) << " m_dataFs.remotePath: " << m_dataFs.remotePath << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << " m_dataFs.remoteName: " << m_dataFs.remoteName << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << " m_dataFsPath: " << m_dataFsPath << HCPENDLOG;

    return true;
}

void HetroBackupJob::PrintFilterInfo()
{
    uint64_t filterSize = m_backupJobPtr->jobParam.filters.size();
    HCP_Log(INFO, MODULE) << "Filter Size = " << filterSize;
    if (filterSize > NUM_10) {
        filterSize = NUM_10;
    }
    for (uint64_t index = 0; index < filterSize; ++index) {
        AppProtect::ResourceFilter filter = m_backupJobPtr->jobParam.filters[index];
        HCP_Log(DEBUG, MODULE) << "jobParam.filter["<< index << "].filterBy: " << filter.filterBy << HCPENDLOG;
        HCP_Log(DEBUG, MODULE) << "jobParam.filter["<< index << "].type: " << filter.type << HCPENDLOG;
        HCP_Log(DEBUG, MODULE) << "jobParam.filter["<< index << "].rule: " << filter.rule << HCPENDLOG;
        HCP_Log(DEBUG, MODULE) << "jobParam.filter["<< index << "].mode: " << filter.mode << HCPENDLOG;
        uint64_t valSize = filter.values.size();
        HCP_Log(INFO, MODULE) << "Filter Values list size = " << valSize;
        if (valSize > NUM_10) {
            valSize = NUM_10;
        }

        for (uint64_t valIndex = 0; valIndex < valSize; ++valIndex) {
            HCP_Log(DEBUG, MODULE) << "jobParam["<< index << "].filter.val[" << valIndex << "]: "
                << WIPE_SENSITIVE(filter.values[valIndex]) << HCPENDLOG;
        }
    }
}

bool HetroBackupJob::PrintJobInfo()
{
    HCP_Log(INFO, MODULE) << "jobPhase: " << m_jobCtrlPhase << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "jobId: " << m_jobId << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "backupJobType: " << (IsFullBackup() ? "FULL" : "INC") << HCPENDLOG;

    HCP_Log(DEBUG, MODULE) << "nasHost.id: " << m_nasHost.id << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasHost.name: " << WIPE_SENSITIVE(m_nasHost.name) << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasHost.vendor: " << WIPE_SENSITIVE(m_nasHost.vendor) << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasHost.vendorsubType: " << m_nasHost.vendorSubType << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasHost.mgrIp: " << m_nasHost.mgrIp << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasHost.port: " << m_nasHost.port << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasHost.auth.type: " << m_nasHost.auth.authType << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasHost.auth.key: " << WIPE_SENSITIVE(m_nasHost.auth.authkey) << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasHost.useSnapshot: " << m_nasHost.nasHostExt.m_useSnapshot << HCPENDLOG;

    HCP_Log(INFO, MODULE) << "nasShare.id: " << m_nasShare.id << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.shareId: " << m_nasShare.nasShareExt.m_shareId << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.filesystemId: " << m_nasShare.nasShareExt.m_fileSystemId << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.dtreeId: " << m_nasShare.nasShareExt.m_dtreeId << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.sharePath: " << m_nasShare.nasShareExt.m_sharePath << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.svcIp: " << m_nasShare.nasShareExt.m_serviceIP << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.sharePath: " << WIPE_SENSITIVE(m_nasShare.sharePath) << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.protocol: " << m_nasShare.nasShareExt.m_protocol << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.authMode: " << m_nasShare.nasShareExt.m_authMode << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasShare.kerberosId: " << m_nasShare.nasShareExt.m_kerberosId << HCPENDLOG;

    HCP_Log(DEBUG, MODULE) << "m_nasShare.nasShareExt.m_filters: "
        << WIPE_SENSITIVE(m_nasShare.nasShareExt.m_filters) << HCPENDLOG;

    HCP_Log(INFO, MODULE) << "dataLayout.backupFormat: " << m_dataLayoutExt.m_backupFormat << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "dataLayout.metadataBackupType: " << m_dataLayoutExt.m_metadataBackupType << HCPENDLOG;

    HCP_Log(DEBUG, MODULE) << "qos.bandwidth: " << m_backupJobPtr->jobParam.qos.bandwidth << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "qos.protectIops: " << m_backupJobPtr->jobParam.qos.protectIops << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "qos.backupIops: " << m_backupJobPtr->jobParam.qos.backupIops << HCPENDLOG;
    PrintFilterInfo();
    return PrintJobInfoForRepo();
}

int HetroBackupJob::CreatSrcDir()
{
    HCP_Log(INFO, MODULE) << "Enter CreatSrcDir" << HCPENDLOG;
    if (!CreateDirectory(m_dataFsPath)) {
        HCP_Log(ERR, MODULE) << "Creat the dir of m_dataFsPath failed, m_dataFsPath is :" << m_dataFsPath << HCPENDLOG;
        return Module::FAILED;
    }
    return Module::SUCCESS;
}

bool HetroBackupJob::PrintJobInfoForRepo()
{
    HCP_Log(DEBUG, MODULE) << "cacheFs.ip: " << m_cacheFs.endpoint.ip << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "cacheFs.sharePath: " << WIPE_SENSITIVE(m_cacheFs.remotePath) << HCPENDLOG;
    for (std::string &path: m_cacheFs.path)
        HCP_Log(DEBUG, MODULE) << "cacheFs.localMountPath: " << WIPE_SENSITIVE(path) << HCPENDLOG;
    for (HostAddress svcIp: m_cacheFs.remoteHost)
        HCP_Log(DEBUG, MODULE) << "cacheFs.svcip: " << svcIp.ip << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "m_cacheFsSvcIp: " << m_cacheFsSvcIp  << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "m_cacheFsPath: " << WIPE_SENSITIVE(m_cacheFsPath) << HCPENDLOG;

    HCP_Log(DEBUG, MODULE) << "backupFs.ip: " << m_dataFs.endpoint.ip << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "backupFs.sharePath: " << WIPE_SENSITIVE(m_dataFs.remoteName) << HCPENDLOG;
    for (std::string &path: m_dataFs.path)
        HCP_Log(DEBUG, MODULE) << "backupFs.localMountPath: " << WIPE_SENSITIVE(path) << HCPENDLOG;
    for (HostAddress svcIp: m_dataFs.remoteHost)
        HCP_Log(DEBUG, MODULE) << "backupFs.svcip: " << svcIp.ip << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "m_backupFsSvcIp: " << m_dataFsSvcIp  << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "m_backupFsPath: " << WIPE_SENSITIVE(m_dataFsPath)  << HCPENDLOG;

    HCP_Log(DEBUG, MODULE) << "m_metaFs.ip: " << m_metaFs.endpoint.ip << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "m_metaFs.sharePath: " << WIPE_SENSITIVE(m_metaFs.remotePath) << HCPENDLOG;
    for (std::string &path: m_metaFs.path)
        HCP_Log(DEBUG, MODULE) << "m_metaFs.localMountPath: " << WIPE_SENSITIVE(path) << HCPENDLOG;
    for (HostAddress svcIp: m_metaFs.remoteHost)
        HCP_Log(DEBUG, MODULE) << "m_metaFs.svcip: " << svcIp.ip << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "m_metaFsSvcIp: " << m_metaFsSvcIp  << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "m_metaFsPath: " << WIPE_SENSITIVE(m_metaFsPath)  << HCPENDLOG;

    return true;
}

bool HetroBackupJob::SetupMetaFsForBackupJob()
{
    // delete directory in case of prev post job not done
    RemoveDirectory(m_metaFsPath + "/statistics_" + m_jobId);
    RemoveDirectory(m_cacheFsPath + "/backup-job/scan/ctrl");
    RemoveDirectory(m_cacheFsPath + "/backup-job/backup/ctrl");
    RemoveDirectory(m_cacheFsPath + "/backup-job/scan/meta/latest");

    int retryCnt = 0;
    bool ret = false;
    while (!ret && retryCnt < NUM_10) {
     /**
     * /<m_metaFsPath.path>: m_metaFsPath fs path passed by DME_UBC for NAS Plugin to save metadata's
     *
     * Create folders,
     * | -- MetaFsPath               // CacheFsPath from UBC
     *   | -- backup-meta-info.json  // json file to save the meta info about the backup copy
     *   | -- statistics             // Folder to save the statistic of backup main job and sub-jobs
     */

        if (!CreateDirectory(m_metaFsPath + "/statistics_" + m_jobId)) {
            HCP_Log(INFO, MODULE) << "setup meta-fs for backup job failed, retyrnCnt : " << retryCnt <<HCPENDLOG;
            retryCnt++;
            sleep(EXECEPTION_RETRY_INTERVAL);
            continue;
        }
        INFOLOG("setup meta-fs for backup job success");

    /**
     * /<m_cacheFsPath.path>: m_cacheFsPath fs path passed by DME_UBC for NAS Plugin to save metadata's
     *
     * Create folders,
     * | -- CacheFsPath             // CacheFsPath from UBC
     *    | -- backup-job           // For backup job
     *       | -- scan              // Info saved by SCAN module
     *          | -- meta           // Meta info
     *             | -- previous    // Meta info (metafile, dcache, fcache) of the previous scan
     *             | -- latest      // Meta info (metafile, dcache, fcache) of the current scan
     *          | -- ctrl           // Control info (this in input to BACKUP module)
     *       | -- backup            // Info saved by BACKUP module (TO-DO: We will remove this)
     *          | -- ctrl
     */

        if (!CreateDirectory(m_cacheFsPath + "/backup-job/scan/meta") ||
            !CreateDirectory(m_cacheFsPath + "/backup-job/scan/ctrl") ||
            !CreateDirectory(m_cacheFsPath + "/backup-job/backup/ctrl")) {
            HCP_Log(INFO, MODULE) << "setup cache-fs for backup job failed, retyrnCnt : " << retryCnt <<HCPENDLOG;
            retryCnt++;
            sleep(EXECEPTION_RETRY_INTERVAL);
        } else {
            INFOLOG("setup cache-fs for backup job success");
            ret = true;
        }
    }
    return ret;
}

bool HetroBackupJob::SetupDataFsForBackupJob()
{
    return true;
}

bool HetroBackupJob::SetupRemoteNasForBackupJob()
{
    /**
     * Setup the remote nas to be protected
     * If m_useSnapShot (to take snapshot and then backup from snapshot) is set to true, we create the snapshot & save
     * the snapshot details like snapshot-name, snapshot-share-path, time at which the snapshot is taken
     */

    /** Its possible that Prerequisite() API is called twice (in case of node restart scenarios).
     * So its possible that the snapshot is already taken
     * If snapshot is already taken, use the old snapshot
     */
    if (m_nasHost.nasHostExt.m_useSnapshot == "1" && !m_generalInfo.m_remoteNasShareSnapshotName.empty()) {
        return true;
    }

    m_generalInfo.m_remoteNasShareSnapshotName = "";
    m_generalInfo.m_remoteNasShareSnapshotTime = GetCurrentTimeInSeconds();
    m_generalInfo.m_remoteNasShareSnapshotPath = m_nasShare.sharePath;

    int64_t homoErrCode = 0;
    if (m_nasHost.nasHostExt.m_useSnapshot == "1") {
        bool ret = HetroCommonService::CreateSnapshot(m_nasHost, m_nasShare,
            m_generalInfo.m_remoteNasShareSnapshotName,
            m_generalInfo.m_remoteNasShareSnapshotTime,
            m_generalInfo.m_remoteNasShareSnapshotPath,
            homoErrCode);
        if (!ret) {
            ERRLOG("Setup remote nas for backup job failed, homoErrCode is %d", homoErrCode);
            homoErrCode = (homoErrCode == 0 ? HomoErrorCode::ERROR_BACKUP_CREATE_SNAP_FAILED_ERROR : homoErrCode);
            if (!PluginUtils::CheckDeviceNetworkConnect(m_localStorageIps)) {
                homoErrCode = HomoErrorCode::AGENT_SWITCH_ERROR_CODE;
            }
            /*  check normal ReportJobDetails is fine */
            ReportJobDetailsWithLabelAndErrcode(SubJobStatus::FAILED,
                "nas_plugin_hetro_backup_prepare_create_snap_fail_label",
                JobLogLevel::TASK_LOG_ERROR,
                homoErrCode, PROGRESS50,
                m_generalInfo.m_remoteNasShareSnapshotName);
            return false;
        }
        ReportJobDetails(SubJobStatus::RUNNING,
                         PROGRESS50,
                         "nas_plugin_hetro_backup_prepare_create_snap_succeed_label",
                         JobLogLevel::TASK_LOG_INFO,
                         m_generalInfo.m_remoteNasShareSnapshotName);
    }
    return true;
}


void HetroBackupJob::WindupRemoteNasForBackupJob()
{
    /* rollback snapdiff code here */
    string deleteSnapshotName = m_generalInfo.m_remoteNasShareSnapshotName;
    if (!deleteSnapshotName.empty()) {
        int64_t homoErrCode = 0;
        if (!HetroCommonService::DeleteSnapshot(m_nasHost, m_nasShare, deleteSnapshotName, homoErrCode)) {
            ERRLOG("failed to delete snapshot: %s", deleteSnapshotName.c_str());
            homoErrCode = (homoErrCode == HomoErrorCode::CERTIFACATE_IS_INVALID)
                ? homoErrCode : HomoErrorCode::ERROR_BACKUP_DEL_SNAP_FAILED_ERROR;
            ReportJobDetailsWithErrorCode(SubJobStatus::RUNNING,
                PROGRESS50, "nas_plugin_hetro_backup_post_del_snap_fail_label", JobLogLevel::TASK_LOG_WARNING,
                homoErrCode, deleteSnapshotName);
        } else {
            INFOLOG("succeed to delete snapshot: %s", deleteSnapshotName.c_str());
            ReportJobDetails(SubJobStatus::RUNNING,
                PROGRESS50, "nas_plugin_hetro_backup_post_del_snap_succeed_label",
                JobLogLevel::TASK_LOG_INFO, deleteSnapshotName);
        }
    }
}

template<typename... Args>
bool HetroBackupJob::ReportJobDetails(SubJobStatus::type jobStatus, int32_t jobProgress,
    std::string logLabel, const JobLogLevel::type &logLevel, Args... logArgs)
{
    SubJobDetails subJobDetails {};
    ActionResult result {};
    std::vector<LogDetail> logDetailList;
    LogDetail logDetail{};
    int32_t jobSpeed = 0;

    // 业务子任务的保活上报限制频率， 90s一次
    if (logLabel.empty() &&
        jobStatus == SubJobStatus::RUNNING) {
        int64_t currTime = PluginUtils::GetCurrentTimeInSeconds();
        if ((currTime - m_lastKeepAliveReportTime) < REPORT_INTERVAL) {
            return true;
        }
        m_lastKeepAliveReportTime = currTime;
    }

    if (IsAbortJob() && jobStatus == SubJobStatus::RUNNING) {
        HCP_Log(INFO, MODULE) << "Job is aborted, force change jobStatus to aborting for"
            << " jobId: " << m_jobId
            << ", subJobId: " << m_subJobId
            << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTING;
        logLabel = "";
    }
    INFOLOG("Enter ReportJobDetails. jobId: %s, subJobId: %s, jobStatus: %d, jobProgress: %d, jobLabel: %s, logLevel: %d",
        m_parentJobId.c_str(), m_subJobId.c_str(), static_cast<int>(jobStatus), jobProgress, logLabel.c_str(),
        static_cast<int>(logLevel));

    if (logLabel != "") {
        AddLogDetail(logDetail, logLabel, logLevel, logArgs...);
    }

    /* TO-DO: Later, discuss with homo team to change REPORT_LOG2AGENT macro to add new param for data size */
    if (m_dataSize != 0) {
        subJobDetails.__set_dataSize(m_dataSize);
    }

    REPORT_LOG2AGENT(subJobDetails, result, logDetailList, logDetail, jobProgress, jobSpeed, jobStatus);
    if (result.code != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Report job details to agent failed: " << result.code <<  HCPENDLOG;
        return false;
    }

    INFOLOG("Exit ReportJobDetails. jobId: %s, subJobId: %s, jobStatus: %d, jobProgress: %d, logLabel: %s, logLevel: %d",
        m_parentJobId.c_str(), m_subJobId.c_str(), static_cast<int>(jobStatus), jobProgress, logLabel.c_str(),
        static_cast<int>(logLevel));
    return true;
}

template<typename... Args>
bool HetroBackupJob::ReportJobDetailsWithErrorCode(SubJobStatus::type jobStatus, int32_t jobProgress,
    std::string logLabel, const JobLogLevel::type &logLevel, const int64_t errCode, Args... logArgs)
{
    SubJobDetails subJobDetails {};
    ActionResult result {};
    std::vector<LogDetail> logDetailList;
    LogDetail logDetail{};
    int32_t jobSpeed = 0;

    if (IsAbortJob() && jobStatus == SubJobStatus::RUNNING) {
        HCP_Log(INFO, MODULE) << "Job is aborted, force change jobStatus to aborting for"
            << " jobId: " << m_jobId
            << ", subJobId: " << m_subJobId
            << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTING;
        logLabel = "";
    }

    HCP_Log(INFO, MODULE) << "Enter ReportJobDetailsWithErrorCode: "
        << "jobId: " << m_parentJobId
        << ", subJobId: " << m_subJobId
        << ", jobStatus: " << jobStatus
        << ", ErrorCode: " << errCode
        << ", jobProgress: " << jobProgress
        << ", logLabel: " << logLabel
        << ", logLevel: " << logLevel
        << HCPENDLOG;

    if (logLabel != "") {
        AddLogDetail(logDetail, logLabel, logLevel, logArgs...);
    }

    /* TO-DO: Later, discuss with homo team to change REPORT_LOG2AGENT macro to add new param for data size */
    if (m_dataSize != 0) {
        subJobDetails.__set_dataSize(m_dataSize);
    }

    /* Reporting error codes */
    AddErrCode(logDetail, errCode);

    REPORT_LOG2AGENT(subJobDetails, result, logDetailList, logDetail, jobProgress, jobSpeed, jobStatus);
    if (result.code != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Report job details to agent failed: " << result.code <<  HCPENDLOG;
        return false;
    }

    HCP_Log(INFO, MODULE) << "Exit ReportJobDetailsWithErrorCode: "
        << "jobId: " << m_parentJobId
        << ", subJobId: " << m_subJobId
        << ", jobStatus: " << jobStatus
        << ", ErrorCode: " << errCode
        << ", jobProgress: " << jobProgress
        << ", logLabel: " << logLabel
        << ", logLevel: " << logLevel
        << HCPENDLOG;
    return true;
}

void HetroBackupJob::ReportJobDetailsWithLabelAndErrcode(SubJobStatus::type jobStatus,
    const std::string& logLabel, const JobLogLevel::type &logLevel, const int64_t errCode,
    int32_t jobProgress, const std::string& message)
{
    SubJobDetails subJobDetails;
    LogDetail logDetail {};
    logDetail.__set_additionalDesc(vector<string>{message});
    std::vector<LogDetail> logDetailList;
    ActionResult result;
    AddLogDetail(logDetail, logLabel, logLevel);
    AddErrCode(logDetail, errCode);
    REPORT_LOG2AGENT(subJobDetails, result, logDetailList, logDetail, jobProgress, 0, jobStatus);
}

void HetroBackupJob::FillScanConfigBasedOnEnviroment(ScanConfig &scanConfig)
{
    std::string deploy_type;
    if (PluginConfig::GetInstance().m_scene != PluginUsageScene::EXTERNAL) {
        deploy_type = PluginUtils::GetDeployType();
    }
    if (deploy_type == X6000_DEPLOY_TYPE) {
        scanConfig.maxOpendirReqCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_OPENDIR_REQ_CNT");
        scanConfig.maxWriteQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_MAX_WRITE_QUEUE_SIZE");
        scanConfig.maxScanQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_MAX_SCAN_QUEUE_SIZE");
        scanConfig.minScanQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_MIN_SCAN_QUEUE_SIZE");
        scanConfig.writeQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_WRITE_QUEUE_SIZE");
        scanConfig.dirEntryReadCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_DIR_ENTRY_READ_COUNT");
        scanConfig.scanMetaFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_DEFAULT_META_FILE_SIZE");
    } else {
        scanConfig.maxOpendirReqCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_OPENDIR_REQ_CNT");
        scanConfig.maxWriteQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_MAX_WRITE_QUEUE_SIZE");
        scanConfig.maxScanQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_MAX_SCAN_QUEUE_SIZE");
        scanConfig.minScanQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_MIN_SCAN_QUEUE_SIZE");
        scanConfig.writeQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_WRITE_QUEUE_SIZE");
        scanConfig.dirEntryReadCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_DIR_ENTRY_READ_COUNT");
        scanConfig.scanMetaFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_DEFAULT_META_FILE_SIZE");
    }
}

void HetroBackupJob::FillScanConfigBasedOnProtocol(ScanConfig &scanConfig)
{
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
        scanConfig.scanIO = IOEngine::LIBNFS;
        scanConfig.nfs.m_serverIp = StripSqrBracketsFromIpAddress(m_nasShare.nasShareExt.m_serviceIP);
        scanConfig.nfs.m_serverPath = m_generalInfo.m_remoteNasShareSnapshotPath;
        if (NeedMountUseNfs4()) {
            scanConfig.nfs.m_mountPath = m_tempLocalMountPath;
        }
        scanConfig.nfs.m_nasServerCheckSleepTime = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_BACKUP_NAS_SERVER_CHECK_SLEEP_TIME");
        scanConfig.nfs.maxOpendirReqCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_LIBNFS_OPENDIR_REQ_CNT");
    } else {
        scanConfig.scanIO = IOEngine::LIBSMB2;
        scanConfig.smb.server = m_nasShare.nasShareExt.m_serviceIP;
        scanConfig.smb.domain = m_nasShare.nasShareExt.m_domainName;
        scanConfig.smb.share = m_generalInfo.m_remoteNasShareSnapshotPath;
        scanConfig.smb.version = HetroCommonService::ConvertStringToSmbVersion(m_generalInfo.m_protocolVersion);
        scanConfig.smb.user = m_nasShare.auth.authkey;
        scanConfig.smb.password = m_nasShare.auth.authPwd;
        if (m_nasShare.nasShareExt.m_encryption == SMB_ENCRYPTION) {
            scanConfig.smb.encryption = true;
        } else {
            scanConfig.smb.encryption = false;
        }

        if (m_nasShare.auth.authType == AuthType::type::KERBEROS) {
            scanConfig.smb.authType = Module::SmbAuthType::KRB5;
        } else {
            scanConfig.smb.authType = Module::SmbAuthType::NTLMSSP;
        }
        std::string agentHomePath = Module::EnvVarManager::GetInstance()->GetAgentHomePath();
        scanConfig.smb.krb5CcacheFile = agentHomePath + KRB5CCNAMEPREFIX + m_jobId;
        scanConfig.smb.krb5ConfigFile = agentHomePath + KRB5CONFIGPREFIX + m_jobId + KRB5CONFIGPOSTFIX;
    }
    return;
}

void HetroBackupJob::FillScanConfig(ScanConfig &scanConfig)
{
    HCP_Log(INFO, MODULE) << " Enter FillScanConfig" << HCPENDLOG;
    scanConfig.jobId = m_jobId;
    scanConfig.subJobId = m_jobId;
    scanConfig.reqID = m_mainJobRequestId;
    scanConfig.failureRecordRootPath = m_cacheFsPath;

    FillScanConfigBasedOnEnviroment(scanConfig);
    FillScanConfigBasedOnProtocol(scanConfig);
    scanConfig.scanType = IsFullBackup() ? (ScanJobType::FULL) : (ScanJobType::INC);
    scanConfig.usrData = (void *)this;
    scanConfig.lastBackupTime = IsFullBackup() ? 0 : m_prevBackupCopyInfo.m_lastBackupTime;
    scanConfig.useLastBackupTime = true;
    HCP_Log(DEBUG, MODULE) << "lastBackupTime: " << ConvertToReadableTime(scanConfig.lastBackupTime) << HCPENDLOG;
    FillScanConfigForFilter(m_backupJobPtr->jobParam.filters, scanConfig);

    scanConfig.disableSmbAcl = m_smbProtectionConfig.m_smbHardlinkProtection == "false";
    scanConfig.disableSmbNlink = m_smbProtectionConfig.m_smbAclProtection == "false";
    /* Path */
    scanConfig.metaPath =  m_cacheFsPath + "/backup-job/scan/meta";
    scanConfig.metaPathForCtrlFiles = m_cacheFsPath + "/backup-job/scan/ctrl";
    /* Callbacks Regiter */
    scanConfig.scanResultCb = ScannerCtrlFileCallBack;
    scanConfig.scanHardlinkResultCb = ScannerHardLinkCallBack;
    scanConfig.mtimeCtrlCb = BackupDirMTimeCallBack;
    scanConfig.deleteCtrlCb = BackupDelCtrlCallBack;

    scanConfig.maxCommonServiceInstance = 1;

    scanConfig.scanCtrlFileTimeSec = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_FILE_TIME_SEC");
    scanConfig.scanCheckPointEnable = false; // discard DME_NAS_SCAN_CHECKPOINT_ENABLED
    scanConfig.triggerTime = GetCurrentTimeInSeconds();
    int backupHotData = Module::SafeStoi(m_smbProtectionConfig.m_backupHotData);
    int backupColdData = Module::SafeStoi(m_smbProtectionConfig.m_backupColdData);
    INFOLOG("triggerTime backupHotData:%d, backupColdData:%d", backupHotData, backupColdData);
    scanConfig.expiredSkipTime = (backupHotData == 0) ? 0 : scanConfig.triggerTime - (time_t)backupHotData;
    scanConfig.expiredColdSkipTime = (backupColdData == 0) ? 0 : scanConfig.triggerTime - (time_t)backupColdData;
    INFOLOG("triggerTime is %llu, expiredSkipTime is %llu, backupHotData is %d",
            scanConfig.triggerTime, scanConfig.expiredSkipTime, (time_t)backupHotData);
    INFOLOG("triggerTime is %llu, expiredColdSkipTime is %llu, backupColdData is %d",
            scanConfig.triggerTime, scanConfig.expiredColdSkipTime, (time_t)backupColdData);

    if (IsAggregate()) {
        FillScanConfigForAggr(scanConfig);
    } else {
        FillScanConfigForNative(scanConfig);
    }
    scanConfig.skipDirs = SCAN_SKIP_DIRS;
    /* rollback snapdiff code here */
}

void HetroBackupJob::FillScanConfigForNative(ScanConfig &scanConfig)
{
    HetroProtectAdvParms advParms {};
    if (!Module::JsonHelper::JsonStringToStruct(m_backupJobPtr->extendInfo, advParms)) {
        HCP_Log(ERR, MODULE) << "Failed to parse protctEnv extendInfo json to struct" << HCPENDLOG;
    }
    if (!advParms.m_fileCountThreshold.empty()) {
        INFOLOG("FileCountThreshold: %s", advParms.m_fileCountThreshold.c_str());
        scanConfig.scanCtrlMaxEntriesFullBkup = PluginUtils::SafeStou32(advParms.m_fileCountThreshold);
        scanConfig.scanCtrlMaxEntriesIncBkup = PluginUtils::SafeStou32(advParms.m_fileCountThreshold);
    } else {
        scanConfig.scanCtrlMaxEntriesFullBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_ENTRIES_FULLBKUP");
        scanConfig.scanCtrlMaxEntriesIncBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_ENTRIES_INCRBKUP");
    }
 
    scanConfig.scanCtrlMinEntriesFullBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_ENTRIES_FULLBKUP");
    scanConfig.scanCtrlMinEntriesIncBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_ENTRIES_INCRBKUP");
    if (scanConfig.scanCtrlMinEntriesFullBkup > scanConfig.scanCtrlMaxEntriesFullBkup) {
        scanConfig.scanCtrlMinEntriesFullBkup = scanConfig.scanCtrlMaxEntriesFullBkup;
    }
 
    if (scanConfig.scanCtrlMinEntriesIncBkup > scanConfig.scanCtrlMaxEntriesIncBkup) {
        scanConfig.scanCtrlMinEntriesIncBkup = scanConfig.scanCtrlMaxEntriesIncBkup;
    }
 
    if (!advParms.m_fileSizeThreshold.empty()) {
        INFOLOG("FileSizeThreshold: %s", advParms.m_fileSizeThreshold.c_str());
        scanConfig.scanCtrlMaxDataSize = PluginUtils::ConvertGBToBytes(advParms.m_fileSizeThreshold);
    } else {
        scanConfig.scanCtrlMaxDataSize = Module::ConfigReader::getString(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_DATASIZE");
    }
 
    scanConfig.scanCtrlMinDataSize = Module::ConfigReader::getString(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_DATASIZE");
    if (PluginUtils::SafeStou64(scanConfig.scanCtrlMinDataSize)
        > PluginUtils::SafeStou64(scanConfig.scanCtrlMaxDataSize)) {
        INFOLOG("scanCtrlMinDataSize is greater than scanCtrlMaxDataSize, set them equal");
        scanConfig.scanCtrlMinDataSize = scanConfig.scanCtrlMaxDataSize;
    }
 
    scanConfig.scanCopyCtrlFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_FILE_SIZE");
}
 
void HetroBackupJob::FillScanConfigForAggr(ScanConfig &scanConfig)
{
    HetroProtectAdvParms advParms {};
    if (!Module::JsonHelper::JsonStringToStruct(m_backupJobPtr->extendInfo, advParms)) {
        HCP_Log(ERR, MODULE) << "Failed to parse protctEnv extendInfo json to struct" << HCPENDLOG;
    }
 
    if (!advParms.m_fileCountThreshold.empty()) {
        INFOLOG("FileCountThreshold: %s", advParms.m_fileCountThreshold.c_str());
        scanConfig.scanCtrlMaxEntriesFullBkup = Module::SafeStoi(advParms.m_fileCountThreshold);
        scanConfig.scanCtrlMaxEntriesIncBkup = Module::SafeStoi(advParms.m_fileCountThreshold);
    } else {
        scanConfig.scanCtrlMaxEntriesFullBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_ENTRIES_FULLBKUP_AGGR");
        scanConfig.scanCtrlMaxEntriesIncBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_ENTRIES_INCRBKUP_AGGR");
    }
    scanConfig.scanCtrlMinEntriesFullBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_ENTRIES_FULLBKUP_AGGR");
    scanConfig.scanCtrlMinEntriesIncBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_ENTRIES_INCRBKUP_AGGR");
    if (scanConfig.scanCtrlMinEntriesFullBkup > scanConfig.scanCtrlMaxEntriesFullBkup) {
        scanConfig.scanCtrlMinEntriesFullBkup = scanConfig.scanCtrlMaxEntriesFullBkup;
    }
 
    if (scanConfig.scanCtrlMinEntriesIncBkup > scanConfig.scanCtrlMaxEntriesIncBkup) {
        scanConfig.scanCtrlMinEntriesIncBkup = scanConfig.scanCtrlMaxEntriesIncBkup;
    }
    if (!advParms.m_fileSizeThreshold.empty()) {
        INFOLOG("FileSizeThreshold: %s", advParms.m_fileSizeThreshold.c_str());
        scanConfig.scanCtrlMaxDataSize = PluginUtils::ConvertGBToBytes(advParms.m_fileSizeThreshold);
    } else {
        scanConfig.scanCtrlMaxDataSize = Module::ConfigReader::getString(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_DATASIZE_AGGR");
    }
    scanConfig.scanCtrlMinDataSize = Module::ConfigReader::getString(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_DATASIZE_AGGR");
    if (PluginUtils::SafeStou64(scanConfig.scanCtrlMinDataSize)
        > PluginUtils::SafeStou64(scanConfig.scanCtrlMaxDataSize)) {
        scanConfig.scanCtrlMinDataSize = scanConfig.scanCtrlMaxDataSize;
    }
    scanConfig.scanCopyCtrlFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_FILE_SIZE_AGGR");
}

void HetroBackupJob::FillSnapDiffScanConfig(ScanConfig &scanConfig)
{
    HCP_Log(INFO, MODULE) << "start fill scan config for V5 Snapdiff" << HCPENDLOG;

    scanConfig.scanType = ScanJobType::SNAPDIFFNAS_GEN;
    scanConfig.scanIO = IOEngine::SNAPDIFFNAS;
    scanConfig.scanCheckPointEnable = false;

    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
        scanConfig.nasSnapdiffProtocol = NAS_PROTOCOL::NFS;
    } else {
        scanConfig.nasSnapdiffProtocol = NAS_PROTOCOL::SMB;
    }

    scanConfig.baseSnapshotName = m_prevBackupCopyInfo.m_oceanStorV5lastSnapshotName;
    scanConfig.incSnapshotName = m_generalInfo.m_remoteNasShareSnapshotName;

    scanConfig.deviceResourceName = m_nasShare.sharePath;
    scanConfig.deviceUrl = m_nasHost.mgrIp;
    scanConfig.devicePort = std::to_string(m_nasHost.port);
    scanConfig.deviceUsername =  m_nasHost.auth.authkey;
    scanConfig.devicePassword = m_nasHost.auth.authPwd;
    scanConfig.deviceCert = "";
    scanConfig.devicePoolID = 0;

    Module::CleanMemoryPwd(m_nasHost.auth.authPwd);

    INFOLOG("FillSnapDiffScanConfig device info, resource =  %s, url = %s, port = %s, username = %s",
        scanConfig.deviceResourceName.c_str(), scanConfig.deviceUrl.c_str(), scanConfig.devicePort.c_str(),
        scanConfig.deviceUsername.c_str());
}

bool HetroBackupJob::GenerateSubJobList(std::vector<SubJob> &subJobList, std::vector<std::string> &ctrlFileList,
    const std::string &dstCtrlFileRelPath, const std::string &ctrlFileFullPath)
{
    SubJob subJob {};
    uint32_t subTaskType {};

    if (!InitSubTask(subJob, m_jobId, dstCtrlFileRelPath, subTaskType, "")) {
        HCP_Log(ERR, MODULE) << "Init subtask failed" << HCPENDLOG;
        return false;
    }

    subJobList.push_back(subJob);
    ctrlFileList.push_back(ctrlFileFullPath);

    return true;
}

bool HetroBackupJob::CreateSubTasksFromCtrlFile(std::string srcDir, std::string dstDir,
    uint32_t subTaskType, bool isFinal)
{
    std::vector<std::string> srcFileList {};
    static int64_t lastCreateJobErrTime = 0;

    if (!isFinal && lastCreateJobErrTime != 0 &&
        ((GetCurrentTimeInSeconds() - lastCreateJobErrTime) < NUMBER180)) {
        HCP_Log(WARN, MODULE) << "lastCreateJobErrTime 180 sec check failed, try later" << HCPENDLOG;
        return true;
    }
    lastCreateJobErrTime = 0;

    if ((subTaskType == SUBJOB_TYPE_DATACOPY_COPY_PHASE) && isFinal) {
        if (!CreateBackupCopyMetaTask()) {
            HCP_Log(ERR, MODULE) << "Create Backup Copy Meta Failed!" << HCPENDLOG;
            return false;
        }
    }

    if (!checkFilePathAndGetSrcFileList(srcDir, dstDir, srcFileList)) {
        return false;
    }

    INFOLOG("NumOfCtrlFiles: %d isFinal: %d", srcFileList.size(), isFinal);
    return CreateSubTasks(srcFileList, dstDir, subTaskType, lastCreateJobErrTime, isFinal);
}

bool HetroBackupJob::CreateSubTasks(const std::vector<std::string>& srcFileList, const std::string& dstDir,
    uint32_t subTaskType, int64_t& lastCreateJobErrTime, bool isFinal)
{
    std::vector<SubJob> subJobList {};
    std::vector<std::string> ctrlFileList {};
    uint32_t validCtrlFileCntr = 0;
    for (const std::string& ctrlFileFullPath : srcFileList) {
        if (IsAbortJob()) {
            HCP_Log(INFO, MODULE) << "Exit Abort for taskid: " << m_jobId << ", subtaskid: " << m_subJobId << HCPENDLOG;
            return true;
        }

        if (!IsValidCtrlFile(subTaskType, ctrlFileFullPath))
            continue;
        if (!isFinal && validCtrlFileCntr++ >= NUMBER100)
            break;

        std::string ctrlFile = GetFileName(ctrlFileFullPath);

        std::string dstCtrlFileFullPath = dstDir + "/" + ctrlFile;
        std::string dstCtrlFileRelPath = dstCtrlFileFullPath.substr(m_cacheFsPath.length(), string::npos);

        CopyFile(ctrlFileFullPath, dstDir);

        if (!GenerateSubJobList(subJobList, ctrlFileList, dstCtrlFileRelPath, ctrlFileFullPath)) {
            HCP_Log(ERR, MODULE) << "Exit CreateSubTasksFromCtrlFile, GenerateSubJobList failed" << HCPENDLOG;
            return false;
        }

        // We create 10 Jobs at a time. If 10 is not accumulated, continue
        if (subJobList.size() % NUMBER10 != 0)
            continue;

        if (!CheckAndRetryCreateSubTask(subJobList, ctrlFileList, lastCreateJobErrTime, isFinal))
            return false;
    }

    if (!CheckAndRetryCreateSubTask(subJobList, ctrlFileList, lastCreateJobErrTime, isFinal))
        return false;

    return true;
}

bool HetroBackupJob::UpdateCopyPhaseStartTimeInGenRsc()
{
    if (m_generalInfo.m_backupCopyPhaseStartTime != 0) {
        return true;
    }

    HCP_Log(INFO, MODULE) << "Enter UpdateCopyPhaseStartTimeInGenRsc" << HCPENDLOG;

    m_generalInfo.m_backupCopyPhaseStartTime = GetCurrentTimeFromRemoteServer(m_metaFsPath);

    if (m_generalInfo.m_backupCopyPhaseStartTime == 0) {
        HCP_Log(ERR, MODULE) << "Get current time of first backup copy subtask failed" << HCPENDLOG;
        return false;
    }
    if (!(UpdateGeneralResource(m_jobId, m_generalInfo))) {
        HCP_Log(ERR, MODULE) << "UpdateGeneralResource failed" << HCPENDLOG;
        return false;
    }
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0,
        "nas_plugin_hetro_backup_data_start_label", JobLogLevel::TASK_LOG_INFO);

    HCP_Log(INFO, MODULE) << "Exit UpdateCopyPhaseStartTimeInGenRsc" << HCPENDLOG;
    return true;
}

bool HetroBackupJob::HandleMonitorScannerCompletion(SubJobStatus::type &jobStatus, std::string &jobLogLabel,
    const std::string &scanCtrFilePath, const std::string &scanMetaFilePath, const std::string &bkupCtrFilePath)
{
    if (!IsAggregate()) {
        if (!CreateSubTasksFromCtrlFile(scanCtrFilePath, bkupCtrFilePath, SUBJOB_TYPE_DATACOPY_COPY_PHASE, true) ||
            !CreateSubTasksFromCtrlFile(scanCtrFilePath, bkupCtrFilePath, SUBJOB_TYPE_DATACOPY_DELETE_PHASE, true) ||
            !CreateSubTasksFromCtrlFile(scanCtrFilePath, bkupCtrFilePath, SUBJOB_TYPE_DATACOPY_HARDLINK_PHASE, true) ||
            !CreateSubTasksFromCtrlFile(scanCtrFilePath, bkupCtrFilePath, SUBJOB_TYPE_DATACOPY_DIRMTIME_PHASE, true)) {
            HCP_Log(ERR, MODULE) << "Create subtask failed" << HCPENDLOG;
            jobStatus = SubJobStatus::FAILED;
            jobLogLabel = "nas_plugin_hetro_backup_scan_fail_label";
            return false;
        }
    } else {
        if (!CreateSubTasksFromCtrlFile(scanCtrFilePath, bkupCtrFilePath, SUBJOB_TYPE_DATACOPY_COPY_PHASE, true) ||
            !CreateSubTasksFromCtrlFile(scanCtrFilePath, bkupCtrFilePath, SUBJOB_TYPE_DATACOPY_HARDLINK_PHASE, true)) {
            HCP_Log(ERR, MODULE) << "Create subtask failed" << HCPENDLOG;
            jobStatus = SubJobStatus::FAILED;
            jobLogLabel = "nas_plugin_hetro_backup_scan_fail_label";
            return false;
        }
    }

    if (!CreateBackupJobTeardownTask() || !CreateCheckSubTask()) {
        HCP_Log(ERR, MODULE) << "Create Teardown subtask or check subtask failed" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_backup_scan_fail_label";
        return false;
    }

    return true;
}

bool HetroBackupJob::MonitorScanner(HetroNativeScanStatistics &scanStatistics,
    SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    HCP_Log(INFO, MODULE) << "Enter Monitor Scanner" << HCPENDLOG;
    std::string scanCtrlFilePath = m_cacheFsPath + "/backup-job/scan/ctrl";
    std::string backupCtrlFilePath = m_cacheFsPath + "/backup-job/backup/ctrl";
    std::string scanMetaFilePath = m_cacheFsPath + "/backup-job/scan/meta/latest";
    SCANNER_TASK_STATUS scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_INPROGRESS;
    jobStatus = SubJobStatus::RUNNING;
    jobProgress = 0;
    jobLogLabel = "";

    do {
        m_scanStatus = m_scanner->GetStatus();
        /* Ensure scanner is ready and start to scan */
        if (m_scanStatus == SCANNER_STATUS::INIT) {
            sleep(SUBTASK_WAIT_FOR_SCANNER_READY_IN_SEC);
            continue;
        }
        UpdateScannerStatistics(scanStatistics, m_scanStats, m_isScannerRestarted, m_jobId);
        FillMonitorScannerVarDetails(scanTaskStatus, jobStatus, jobLogLabel, jobProgress);
        if (scanTaskStatus != SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_INPROGRESS) {
            break;
        }

        if (IsAbortJob()) {
            HCP_Log(INFO, MODULE) << "Scanner - Abort is invocked for" << " taskid: " << m_jobId
                << ", subtaskid: " << m_subJobId << HCPENDLOG;
            if (SCANNER_STATUS::SUCCESS != m_scanner->Abort()) {
                HCP_Log(ERR, MODULE) << "scanner Abort is failed" << HCPENDLOG;
            }
        }
        if (!CreateSubTasksFromCtrlFile(scanCtrlFilePath, backupCtrlFilePath, SUBJOB_TYPE_DATACOPY_COPY_PHASE)) {
            HCP_Log(ERR, MODULE) << "Create subtask failed, abort scan" << HCPENDLOG;
            m_scanner->Abort();
        }
        ReportScannerRunningStatus(scanStatistics);
        sleep(GENERATE_SUBTASK_MONITOR_DUR_IN_SEC);
    } while (true);
    m_scanner->Destroy();
    if (!CreateBackupJobTaskToCreateFurtherSubTasks()) {
        return false;
    }
    HCP_Log(INFO, MODULE) << "Exit Monitor Scanner" << HCPENDLOG;
    return true;
}

bool HetroBackupJob::ReportScannerRunningStatus(HetroNativeScanStatistics &scanStatistics)
{
    if ((GetCurrentTimeInSeconds() - m_lastScannerReportTime) > SCANNER_REPORT_CIRCLE_TIME) {
        INFOLOG("nas_plugin_hetro_backup_scan_inprogress_label, totalDirs:%llu, totalFiles:%llu, totalSizes:%s",
            scanStatistics.m_totDirs, scanStatistics.m_totFiles, FormatCapacity(scanStatistics.m_totalSize).c_str());
        ReportJobDetails(SubJobStatus::RUNNING,
                         PROGRESS0,
                         "nas_plugin_hetro_backup_scan_inprogress_label",
                         JobLogLevel::TASK_LOG_INFO,
                         std::to_string(scanStatistics.m_totDirs),
                         std::to_string(scanStatistics.m_totFiles),
                         FormatCapacity(scanStatistics.m_totalSize),
                         std::to_string(scanStatistics.m_totDirsToBackup),
                         std::to_string(scanStatistics.m_totFilesToBackup),
                         FormatCapacity(scanStatistics.m_totalSizeToBackup));
        m_lastScannerReportTime = GetCurrentTimeInSeconds();
    } else {
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    }
    return true;
}

bool HetroBackupJob::ReportScannerCompleteStatus(HetroNativeScanStatistics &scanStatistics)
{
    if (scanStatistics.m_totFailedDirs != 0) {
        ReportJobDetails(SubJobStatus::COMPLETED,
                         PROGRESS100,
                         "nas_plugin_hetro_backup_scan_completed_with_warn_label",
                         JobLogLevel::TASK_LOG_WARNING,
                         std::to_string(scanStatistics.m_totDirs),
                         std::to_string(scanStatistics.m_totFiles),
                         FormatCapacity(scanStatistics.m_totalSize),
                         std::to_string(scanStatistics.m_totFailedDirs),
                         std::to_string(scanStatistics.m_totDirsToBackup),
                         std::to_string(scanStatistics.m_totFilesToBackup),
                         FormatCapacity(scanStatistics.m_totalSizeToBackup));
    } else {
        ReportJobDetails(SubJobStatus::COMPLETED,
                         PROGRESS100,
                         "nas_plugin_hetro_backup_scan_completed_label",
                         JobLogLevel::TASK_LOG_INFO,
                         std::to_string(scanStatistics.m_totDirs),
                         std::to_string(scanStatistics.m_totFiles),
                         FormatCapacity(scanStatistics.m_totalSize),
                         std::to_string(scanStatistics.m_totDirsToBackup),
                         std::to_string(scanStatistics.m_totFilesToBackup),
                         FormatCapacity(scanStatistics.m_totalSizeToBackup));
    }
    return true;
}

void HetroBackupJob::FillMonitorScannerVarDetails(SCANNER_TASK_STATUS &scanTaskStatus,
    SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    if (m_scanStatus == SCANNER_STATUS::COMPLETED) {
        HCP_Log(INFO, MODULE) << "Scan completed" << HCPENDLOG;
        jobProgress = PROGRESS100;
        jobStatus = SubJobStatus::COMPLETED;
        jobLogLabel = "nas_plugin_hetro_backup_scan_completed_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_SUCCESS;
    } else if (m_scanStatus == SCANNER_STATUS::FAILED) {
        HCP_Log(ERR, MODULE) << "Scan failed" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_backup_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::ABORT_IN_PROGRESS) {
        HCP_Log(ERR, MODULE) << "Scan abort in progress" << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTING;
        jobLogLabel = "";
    } else if (m_scanStatus == SCANNER_STATUS::ABORTED) {
        HCP_Log(ERR, MODULE) << "Scan aborted" << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTED;
        jobLogLabel = "";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_ABORTED;
    } else if (m_scanStatus == SCANNER_STATUS::SCAN_READ_COMPLETED) {
        jobStatus = SubJobStatus::RUNNING;
        jobLogLabel = "";
    } else if (m_scanStatus == SCANNER_STATUS::CTRL_DIFF_IN_PROGRESS) {
        jobStatus = SubJobStatus::RUNNING;
        jobLogLabel = "";
    } else if (m_scanStatus == SCANNER_STATUS::SECONDARY_SERVER_NOT_REACHABLE) {
        HCP_Log(ERR, MODULE) << "Scan failed as sec nas server is not reachable" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_backup_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::PROTECTED_SERVER_NOT_REACHABLE) {
        HCP_Log(ERR, MODULE) << "Scan failed as protected nas server is not reachable" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_backup_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::ERROR_INC_TO_FULL) {
        HCP_Log(ERR, MODULE) << "Scan failed as to change INC to FULL Backup" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_backup_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::INCOMPLETE_SCAN_REACH_LIMIT) {
        HCP_Log(ERR, MODULE) << "Scan failed as too many files stat failed" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_backup_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    }
    return;
}

void HetroBackupJob::FillBackupConfigPhase(BackupParams &backupParams, HetroBackupSubJob &backupSubJob)
{
    if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_COPY_PHASE)
        backupParams.phase = BackupPhase::COPY_STAGE;
    else if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_HARDLINK_PHASE)
        backupParams.phase = BackupPhase::HARDLINK_STAGE;
    else if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_DELETE_PHASE)
        backupParams.phase = BackupPhase::DELETE_STAGE;
    else if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_DIRMTIME_PHASE)
        backupParams.phase = BackupPhase::DIR_STAGE;
}

void HetroBackupJob::FillBackupNFSConfig(BackupParams &backupParams)
{
    LibnfsBackupAdvanceParams srcAdvanceParams {};
    LibnfsBackupAdvanceParams dstAdvanceParams {};
    std::string backupIp {};

    srcAdvanceParams.maxPendingAsyncReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MAX_PENDING_ASYNC_REQ_CNT");
    srcAdvanceParams.minPendingAsyncReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MIN_PENDING_ASYNC_REQ_CNT");
    srcAdvanceParams.maxPendingWriteReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MAX_PENDING_WRITE_REQ_CNT");
    srcAdvanceParams.minPendingWriteReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MIN_PENDING_WRITE_REQ_CNT");
    srcAdvanceParams.maxPendingReadReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MAX_PENDING_READ_REQ_CNT");
    srcAdvanceParams.minPendingReadReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MIN_PENDING_READ_REQ_CNT");
    srcAdvanceParams.serverCheckMaxCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_NAS_SERVER_CHECK_MAX_ERR_COUNT");
    srcAdvanceParams.serverCheckSleepTime = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_NAS_SERVER_CHECK_SLEEP_TIME");
    srcAdvanceParams.serverCheckRetry= Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_NAS_SERVER_CHECK_RETRY_CNT");
    srcAdvanceParams.jobStartTime = m_generalInfo.m_backupCopyPhaseStartTime;
    srcAdvanceParams.deleteJobStartTime = m_generalInfo.m_backupDelPhaseStartTime;
    srcAdvanceParams.protocolVersion = m_generalInfo.m_protocolVersion;
    dstAdvanceParams = srcAdvanceParams;
    srcAdvanceParams.ip = StripSqrBracketsFromIpAddress(m_nasShare.nasShareExt.m_serviceIP);
    srcAdvanceParams.sharePath = m_generalInfo.m_remoteNasShareSnapshotPath;
    backupIp = m_dataFs.remoteHost[g_nodeLevelTaskInfo.GetSubTasksCount() % m_dataFs.remoteHost.size()].ip;
    HCP_Log(INFO, MODULE) << "Loadbalance selected, backupParams.backupIp: " << backupIp << HCPENDLOG;
    dstAdvanceParams.ip = StripSqrBracketsFromIpAddress(backupIp);
    dstAdvanceParams.sharePath = IsAggregate() ? m_dataFs.remotePath + "/" + m_backupJobPtr->copy.id :
        m_dataFs.remotePath;
    dstAdvanceParams.dataPath = m_dataFsPath;

    backupParams.srcAdvParams = make_shared<LibnfsBackupAdvanceParams>(srcAdvanceParams);
    backupParams.dstAdvParams = make_shared<LibnfsBackupAdvanceParams>(dstAdvanceParams);
}

void HetroBackupJob::FillBackupCIFSConfig(BackupParams &backupParams)
{
    std::shared_ptr<LibsmbBackupAdvanceParams> srcAdvParams = make_shared<LibsmbBackupAdvanceParams>();
    std::shared_ptr<LibsmbBackupAdvanceParams> dstAdvParams = make_shared<LibsmbBackupAdvanceParams>();
    backupParams.srcAdvParams = srcAdvParams;
    backupParams.dstAdvParams = dstAdvParams;

    // source device, backup is protected side
    srcAdvParams->server = m_nasShare.nasShareExt.m_serviceIP;
    srcAdvParams->share = m_generalInfo.m_remoteNasShareSnapshotPath;
    srcAdvParams->version = m_generalInfo.m_protocolVersion;

    int backupAdsFlag = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION, "DME_CIFS_BACKUP_ADS");
    srcAdvParams->backupAds = backupAdsFlag ? true : false;
    HCP_Log(DEBUG, MODULE) << "backupAds: " << srcAdvParams->backupAds << HCPENDLOG;

    srcAdvParams->maxPendingAsyncReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_MAX_PENDING_ASYNC_REQ_CNT");
    srcAdvParams->serverCheckMaxCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_SERVER_CHECK_MAX_ERR_COUNT");
    srcAdvParams->maxOpenedFilehandleCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_MAX_OPENED_FILEHANDLE_COUNT");
    srcAdvParams->pollExpiredTime = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_POLL_EXPIRED_TIME");
    if (m_nasShare.nasShareExt.m_encryption == SMB_ENCRYPTION) {
        HCP_Log(DEBUG, MODULE) << "nas share encryption is true." << HCPENDLOG;
        srcAdvParams->encryption = true;
    } else {
        srcAdvParams->encryption = false;
    }
    srcAdvParams->sign = false;
    srcAdvParams->timeout = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_PROTECTED_CONNECTION_TIMEOUT");
    if (m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        srcAdvParams->authType = "krb5";
    } else {
        srcAdvParams->authType = "ntlmssp";
    }
    HCP_Log(DEBUG, MODULE) << "src params auth type:"<< srcAdvParams->authType << HCPENDLOG;

    // destination device, backup is backup side
    dstAdvParams->server = m_dataFs.remoteHost[g_nodeLevelTaskInfo.GetSubTasksCount() % m_dataFs.remoteHost.size()].ip;
    dstAdvParams->share = "/" + m_dataFs.remoteName;
    dstAdvParams->version = m_generalInfo.m_protocolVersion;
    dstAdvParams->encryption = false;
    dstAdvParams->sign = false;

    dstAdvParams->timeout = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_STORAGE_CONNECTION_TIMEOUT");

    INFOLOG("adv params: %d %d %d %d %d %d", srcAdvParams->maxPendingAsyncReqCnt,
        srcAdvParams->serverCheckMaxCount, srcAdvParams->maxOpenedFilehandleCount,
        srcAdvParams->pollExpiredTime, srcAdvParams->timeout, dstAdvParams->timeout);
    dstAdvParams->authType = "ntlmssp";
    // aggragate backup need copyID path prefix
    dstAdvParams->rootPath = IsAggregate() ? m_backupJobPtr->copy.id : m_dataFs.subDirPath.erase(0, 1);
    HCP_Log(DEBUG, MODULE) << "dst params auth type:"<< dstAdvParams->authType << HCPENDLOG;
}

bool HetroBackupJob::IsAggregate()
{
    return m_aggrInfo.m_isAggregate == "2"; // 聚合是2，不聚合是1
}

bool HetroBackupJob::GetAggCopyExtendInfo(std::string& jsonString)
{
    AggCopyExtendInfo aggCopyExtendInfo;
    if (IsAggregate()) {
        aggCopyExtendInfo.isAggregation = "true";
        aggCopyExtendInfo.dataPathSuffix = m_backupJobPtr->copy.id;
        aggCopyExtendInfo.metaPathSuffix = m_backupJobPtr->copy.id;
        aggCopyExtendInfo.maxSizeToAggregate = m_aggrInfo.m_maxSizeToAggregate;
        aggCopyExtendInfo.maxSizeAfterAggregate = m_aggrInfo.m_maxSizeAfterAggregate;
    } else {
        aggCopyExtendInfo.isAggregation = "false";
        aggCopyExtendInfo.dataPathSuffix = "";
        aggCopyExtendInfo.metaPathSuffix = "";
        aggCopyExtendInfo.maxSizeToAggregate = "0";
        aggCopyExtendInfo.maxSizeAfterAggregate = "0";
    }
    if (!Module::JsonHelper::StructToJsonString(aggCopyExtendInfo, jsonString)) {
        ERRLOG("Exit ReportCopyAdditionalInfo Failed,aggCopyExtendInfo json trans failed");
        return false;
    }
    return true;
}

void HetroBackupJob::FillAggregateBackupCommonParams(BackupParams &backupParams)
{
    // 聚合时不需要写元数据
    backupParams.commonParams.writeMeta = false;
    backupParams.commonParams.backupDataFormat = BackupDataFormat::AGGREGATE;
    backupParams.commonParams.maxAggregateFileSize = std::stoul(m_aggrInfo.m_maxSizeAfterAggregate);
    backupParams.commonParams.maxFileSizeToAggregate = std::stoul(m_aggrInfo.m_maxSizeToAggregate);
    int aggregateThreadNum = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION, "DME_NAS_AGGR_THREAD_POOL_CNT");
    HCP_Log(INFO, MODULE) <<" FillBackupCommonParams Aggregate Info, aggregateThreadNum: " << aggregateThreadNum <<
        " maxAggregateFileSize: " << backupParams.commonParams.maxAggregateFileSize <<
        " maxFileSizeToAggregate: " << backupParams.commonParams.maxFileSizeToAggregate << HCPENDLOG;
    if (aggregateThreadNum > MAX_AGGREGATE_NUM || aggregateThreadNum <= 0) {
        aggregateThreadNum = DEFAULT_AGGREGATE_NUM;
    }
    backupParams.commonParams.aggregateThreadNum = aggregateThreadNum;
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        backupParams.commonParams.writeAcl = false;
    }

    if (IsSubTaskStatsFileExists()) {
        backupParams.commonParams.isReExecutedTask = true;
        HCP_Log(INFO, MODULE) << "Previous Job subTask has been restarted." << HCPENDLOG;
    }
}

void HetroBackupJob::FillBackupCommonParams(BackupParams &backupParams)
{
    backupParams.commonParams.jobId = m_jobId;
    backupParams.commonParams.subJobId = m_subJobId;
    backupParams.commonParams.reqID = m_subJobRequestId;
    backupParams.commonParams.maxErrorFiles = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_NAS_SERVER_CHECK_MAX_ERR_COUNT");
    backupParams.commonParams.maxBufferCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MAX_TOTAL_BLOCK_BUFFER_CNT");
    backupParams.commonParams.maxBufferSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MAX_TOTAL_BLOCK_BUFFER_SIZE");
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
        backupParams.commonParams.blockSize = ONE_MB; // ONE MB
    } else if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        int64_t errCode = 0;
        // 通过连接源端和目的端的smb，获取读和写的块的大小的最小值
        bool ret = CheckSmbConnectVersion(
            HetroCommonService::ConvertStringToSmbVersion(m_generalInfo.m_protocolVersion), errCode);
        if (ret == MP_TRUE) {
            backupParams.commonParams.blockSize = m_maxSmbBlockSize;
        } else {
            backupParams.commonParams.blockSize = DEFAULT_SMB_BLOCK_SIZE;
        }
    }
    backupParams.commonParams.restoreReplacePolicy = RestoreReplacePolicy::NONE;
    backupParams.commonParams.metaPath = m_metaFsPath;
    backupParams.commonParams.writeDisable = false;
    backupParams.commonParams.writeSparseFile = (m_smbProtectionConfig.m_isSparseFileDetection == "true");

    backupParams.commonParams.failureRecordRootPath = m_cacheFsPath;
    INFOLOG("fill failure recorder params, jobID %s, subJobId %s, failureRecordRootPath %s",
        m_jobId.c_str(), m_subJobId.c_str(), m_failureRecordRoot.c_str());
    /* recorder quota ran out */
    HetroNativeBackupStats backupStatistics {};
    std::string filePath = m_metaFsPath + "/statistics_" + m_jobId + "/backup-stats-main-" + m_jobId + ".json";
    ReadBackupStatsFromFile(filePath, backupStatistics);
    if (backupStatistics.m_noOfFailureRecordsWritten >= m_maxFailureRecordsNum) {
        backupParams.commonParams.maxFailureRecordsNum = 0;
    }

    if (IsAggregate()) {
        FillAggregateBackupCommonParams(backupParams);
    } else {
        backupParams.commonParams.writeMeta = true;
        backupParams.commonParams.backupDataFormat = BackupDataFormat::NATIVE;
        if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
            backupParams.commonParams.writeAcl = true;
        }
    }
}

void HetroBackupJob::FillBackupConfig(BackupParams &backupParams, HetroBackupSubJob &backupSubJob)
{
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
        backupParams.srcEngine = BackupIOEngine::LIBNFS;
        backupParams.dstEngine = BackupIOEngine::LIBNFS;
        FillBackupNFSConfig(backupParams);
    } else if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        backupParams.srcEngine = BackupIOEngine::LIBSMB;
        backupParams.dstEngine = BackupIOEngine::LIBSMB;
        FillBackupCIFSConfig(backupParams);
        FillBackupSmbConfigAuthInfo(backupParams, m_jobId,
            m_dataFs, m_nasShare, BackupType::BACKUP_FULL);
    }

    FillBackupConfigPhase(backupParams, backupSubJob);
    FillBackupCommonParams(backupParams);
    backupParams.backupType = IsFullBackup() ? BackupType::BACKUP_FULL : BackupType::BACKUP_INC;
    backupParams.scanAdvParams.metaFilePath = m_cacheFsPath + "/backup-job/scan/meta/latest/";
    backupParams.scanAdvParams.useXmetaFileHandle = true;
}

bool HetroBackupJob::StartBackup(HetroBackupSubJob backupSubJob)
{
    BackupParams backupParams {};
    FillBackupConfig(backupParams, backupSubJob);

    m_backup = FS_Backup::BackupMgr::CreateBackupInst(backupParams);
    if (m_backup == nullptr) {
        HCP_Log(ERR, MODULE) << "Create backup instance failed" << HCPENDLOG;
        return false;
    }

    if (BackupRetCode::SUCCESS != m_backup->Enqueue(backupSubJob.m_ControlFile)) {
        HCP_Log(ERR, MODULE) << "enqueue backup instance failed" << HCPENDLOG;
        return false;
    }
    if (BackupRetCode::SUCCESS != m_backup->Start()) {
        HCP_Log(ERR, MODULE) << "Start backup instance failed" << HCPENDLOG;
        return false;
    }
    return true;
}

HetroBackupJob::MONITOR_BACKUP_RES_TYPE HetroBackupJob::MonitorBackup(BackupStats &backupStatistics,
    SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    HCP_Log(INFO, MODULE) << "Enter Monitor Backup" << HCPENDLOG;
    jobStatus = SubJobStatus::RUNNING;
    jobProgress = 0;
    jobLogLabel = "";
    BackupStats tmpStats;
    time_t statLastUpdateTime = PluginUtils::GetCurrentTimeInSeconds();

    do {
        m_backupStatus = m_backup->GetStatus();
        HCP_Log(INFO, MODULE) << "m_backupStatus:" << static_cast<int>(m_backupStatus) << HCPENDLOG;

        tmpStats = m_backup->GetStats();
        /* 若文件已经全被write，单仍然未完成说明聚合的sql任务还未结束，不用重新备份 */
        if (backupStatistics != tmpStats) {
            statLastUpdateTime = PluginUtils::GetCurrentTimeInSeconds();
            INFOLOG("backup statistics last update time: %ld", statLastUpdateTime);
            backupStatistics = tmpStats;
        } else if (m_backupStatus == BackupPhaseStatus::INPROGRESS &&
            (tmpStats.noOfFilesCopied + tmpStats.noOfFilesFailed != tmpStats.noOfFilesToBackup) &&
            PluginUtils::GetCurrentTimeInSeconds() - statLastUpdateTime >
            Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION, "BACKUP_STUCK_TIME")) {
            HandleMonitorStuck(backupStatistics, jobStatus, jobLogLabel, jobProgress);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_NEEDRETRY;
        }
        UpdateBackupStatistics(backupStatistics);

        if (m_backupStatus == BackupPhaseStatus::COMPLETED) {
            HandleMonitorComplete(jobStatus, jobLogLabel, jobProgress);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_SUCCESS;
        } else if (m_backupStatus == BackupPhaseStatus::FAILED ||
            m_backupStatus == BackupPhaseStatus::FAILED_NOACCESS ||
            m_backupStatus == BackupPhaseStatus::FAILED_NOSPACE ||
            m_backupStatus == BackupPhaseStatus::FAILED_SEC_SERVER_NOTREACHABLE ||
            m_backupStatus == BackupPhaseStatus::FAILED_PROT_SERVER_NOTREACHABLE) {
            HandleMonitorFailed(jobStatus, jobLogLabel);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_FAILED;
        } else if (m_backupStatus == BackupPhaseStatus::ABORTED) {
            HandleMonitorAborted(jobStatus, jobLogLabel);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_ABORTED;
        } else if (m_backupStatus == BackupPhaseStatus::ABORT_INPROGRESS) {
            jobStatus = SubJobStatus::ABORTING;
            jobLogLabel = "";
        }

        if (IsAbortJob()) {
            INFOLOG("Abort Backup taskId: %s, subtask: %s", m_jobId.c_str(), m_subJobId.c_str());
            m_backup->Abort();
        }
        sleep(EXECUTE_SUBTASK_MONITOR_DUR_IN_SEC);
    } while (true);

    HCP_Log(INFO, MODULE) << "Exit Monitor Backup" << HCPENDLOG;
    return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_SUCCESS;
}

void HetroBackupJob::HandleMonitorStuck(BackupStats &backupStatistics,
    SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    WARNLOG("backup statistic has not been update for %ds",
        Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION, "BACKUP_STUCK_TIME"));
    if (BackupRetCode::SUCCESS != m_backup->Abort()) {
        HCP_Log(ERR, MODULE) << "backup Abort is failed" << HCPENDLOG;
    }
    backupStatistics.noOfDirFailed += backupStatistics.noOfDirToBackup - backupStatistics.noOfDirCopied;
    backupStatistics.noOfFilesFailed += backupStatistics.noOfFilesToBackup - backupStatistics.noOfFilesCopied;
    UpdateBackupStatistics(backupStatistics);
    jobLogLabel = "";
    jobProgress = PROGRESS100;
    jobStatus = SubJobStatus::COMPLETED;
}

void HetroBackupJob::HandleMonitorComplete(SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    HCP_Log(INFO, MODULE) << "Monitor Backup - BACKUP_PHASE_STATUS_COMPLETED " << HCPENDLOG;
    jobLogLabel = "";
    jobProgress = PROGRESS100;
    jobStatus = SubJobStatus::COMPLETED;
}

void HetroBackupJob::HandleMonitorFailed(SubJobStatus::type &jobStatus, std::string &jobLogLabel)
{
    HCP_Log(ERR, MODULE) << "Monitor Backup - BACKUP_PHASE_STATUS_FAILED " << HCPENDLOG;
    jobStatus = SubJobStatus::FAILED;
    jobLogLabel = "nas_plugin_hetro_backup_data_fail_label";
}

void HetroBackupJob::HandleMonitorAborted(SubJobStatus::type &jobStatus, std::string &jobLogLabel)
{
    HCP_Log(ERR, MODULE) << "Monitor Backup - BACKUP_PHASE_STATUS_ABORTED " << HCPENDLOG;
    jobStatus = SubJobStatus::ABORTED;
    jobLogLabel = "";
}

bool HetroBackupJob::ReportBackupRunningStatus(uint64_t curSubJobDataSz)
{
    HetroNativeBackupStats mainBackupJobStatistics {};
    if (m_subTaskType == SUBJOB_TYPE_DATACOPY_DIRMTIME_PHASE) {
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        return true;
    }

    std::string dir = m_metaFsPath + "/statistics_" + m_jobId;
    std::vector<std::string> fileList {};
    if (!GetFileListInDirectory(dir, fileList)) {
        HCP_Log(ERR, MODULE) << "Get filelist for dir failed: " << dir << HCPENDLOG;
        return false;
    }

    uint32_t i = 0;
    uint32_t noOfFiles = 0;
    for (i = 0; i < fileList.size(); i++) {
        if (IsAbortJob()) {
            HCP_Log(WARN, MODULE) << "Abort invoked for the job" << HCPENDLOG;
            return true;
        }
        HetroNativeBackupStats subBackupJobStatistics {};
        if (!ReadBackupStatsFromFile(fileList[i], subBackupJobStatistics))
            continue;
        mainBackupJobStatistics.m_noOfDirCopied += subBackupJobStatistics.m_noOfDirCopied;
        mainBackupJobStatistics.m_noOfFilesCopied += subBackupJobStatistics.m_noOfFilesCopied;
        mainBackupJobStatistics.m_noOfBytesCopied += subBackupJobStatistics.m_noOfBytesCopied;

        if (noOfFiles++ % NUMBER10 == 0) {
            /* Since this loop may take too much time, report progress to framework, every 10 files */
            ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        }
    }
    INFOLOG("nas_plugin_hetro_backup_data_inprogress_label, DirCopied:%llu, FilesCopied:%llu, BytesCopied:%s",
        mainBackupJobStatistics.m_noOfDirCopied, mainBackupJobStatistics.m_noOfFilesCopied,
        FormatCapacity(mainBackupJobStatistics.m_noOfBytesCopied).c_str());
    if (mainBackupJobStatistics.m_noOfBytesCopied != 0)
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0,
                         "nas_plugin_hetro_backup_data_inprogress_label", JobLogLevel::TASK_LOG_INFO,
                         std::to_string(mainBackupJobStatistics.m_noOfDirCopied),
                         std::to_string(mainBackupJobStatistics.m_noOfFilesCopied),
                         FormatCapacity(mainBackupJobStatistics.m_noOfBytesCopied));
    else
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);

    return true;
}

bool HetroBackupJob::ReportBackupCompletionStatus()
{
    HetroNativeBackupStats backupStatistics {};
    std::string filePath = m_metaFsPath + "/statistics_" + m_jobId + "/backup-stats-main-" + m_jobId + ".json";
    ReadBackupStatsFromFile(filePath, backupStatistics);

    /* As we report this from teardown-subjob or postjob, set datasize to 0. SO that UBC do not consider this size
      for speed calc */
    m_dataSize = 0;
    if (backupStatistics.m_noOfDirFailed != 0 || backupStatistics.m_noOfFilesFailed != 0) {
        ReportJobDetails(SubJobStatus::COMPLETED,
                         PROGRESS100,
                         "nas_plugin_hetro_backup_data_completed_with_warn_label",
                         JobLogLevel::TASK_LOG_WARNING,
                         std::to_string(backupStatistics.m_noOfDirCopied),
                         std::to_string(backupStatistics.m_noOfFilesCopied),
                         FormatCapacity(backupStatistics.m_noOfBytesCopied),
                         std::to_string(backupStatistics.m_noOfDirFailed),
                         std::to_string(backupStatistics.m_noOfFilesFailed));
    } else {
        ReportJobDetails(SubJobStatus::COMPLETED,
                         PROGRESS100,
                         "nas_plugin_hetro_backup_data_completed_label",
                         JobLogLevel::TASK_LOG_INFO,
                         std::to_string(backupStatistics.m_noOfDirCopied),
                         std::to_string(backupStatistics.m_noOfFilesCopied),
                         FormatCapacity(backupStatistics.m_noOfBytesCopied));
    }
    return true;
}

bool HetroBackupJob::UpdateBackupStatistics(BackupStats &backupStatistics)
{
    m_dataSize = backupStatistics.noOfBytesCopied/NUMBER1024;
    std::string statPrintSubJobId = (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) ?
        "[" + std::to_string(m_subJobRequestId) + "]" : "";
    UpdateBackupSubTaskStatistics(backupStatistics, m_subTaskType, m_metaFsPath, m_subJobInfo);
    PrintBackupStatistics(backupStatistics, m_jobId, m_backupStatus, statPrintSubJobId);
    if (!ShareResourceManager::GetInstance().CanReportStatToPM(m_jobId + "_backup")) {
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    } else {
        ReportBackupRunningStatus(backupStatistics.noOfBytesCopied);
    }
    return true;
}

std::string HetroBackupJob::GetProtocolName(std::string protocol)
{
    return (protocol == NAS_PROTOCOL_TYPE_E_NFS) ? "nfs" : "cifs";
}

bool HetroBackupJob::UpdateBackupStartTimeInSharedResource(HetroBackupSubJob& backupSubJob)
{
    std::string proto = GetProtocolName(m_nasShare.nasShareExt.m_protocol);
    HetroNativeGeneral generalInfo {};

    if (!IsUpdateBackupStartTimeRequired(backupSubJob, m_generalInfo))
        return true;

    if (!LockGeneralResource(m_jobId)) {
        HCP_Log(ERR, MODULE) << "LockGeneralResource failed in UpdateBackupStartTimeInSharedResource" << HCPENDLOG;
        return false;
    }

    if (!GetGeneralResource(m_jobId, generalInfo))
        goto error;

    if ((backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_COPY_PHASE) &&
        (generalInfo.m_backupCopyPhaseStartTime == 0)) {
        m_generalInfo.m_backupCopyPhaseStartTime =
            GetCurrentTimeFromRemoteServer(m_metaFsPath);
        if (m_generalInfo.m_backupCopyPhaseStartTime == 0) {
            HCP_Log(ERR, MODULE) << "Get current time of first backup copy subtask failed" << HCPENDLOG;
            goto error;
        }
        generalInfo.m_backupCopyPhaseStartTime = m_generalInfo.m_backupCopyPhaseStartTime;
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0,
            "nas_plugin_hetro_backup_data_start_label", JobLogLevel::TASK_LOG_INFO);
    }

    if ((backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_DELETE_PHASE) &&
        (generalInfo.m_backupDelPhaseStartTime == 0)) {
        m_generalInfo.m_backupDelPhaseStartTime =
            GetCurrentTimeFromRemoteServer(m_metaFsPath);
        if (m_generalInfo.m_backupDelPhaseStartTime == 0) {
            HCP_Log(ERR, MODULE) << "Get current time of first backup del subtask failed" << HCPENDLOG;
            goto error;
        }
        generalInfo.m_backupDelPhaseStartTime = m_generalInfo.m_backupDelPhaseStartTime;
    }

    if (!UpdateGeneralResource(m_jobId, generalInfo))
        goto error;
    if (!UnlockGeneralResource(m_jobId))
        return false;

    return true;

error:
    UnlockGeneralResource(m_jobId);
    return false;
}

bool HetroBackupJob::CreateBackupJobTeardownTask()
{
    std::string backupSubJobStr {};
    HetroBackupSubJob backupSubJob {};
    SubJob subJob {};
    HetroNativeGeneral generalInfo {};
    backupSubJob.m_SubTaskType = SUBJOB_TYPE_TEARDOWN_PHASE;

    if (!Module::JsonHelper::StructToJsonString(backupSubJob, backupSubJobStr)) {
        HCP_Log(ERR, MODULE) << "Exit CreateBackupJobTeardownTask failed" << HCPENDLOG;
        return false;
    }
    subJob.__set_jobId(m_jobId);
    subJob.__set_jobName(SUBJOB_TYPE_TEARDOWN_JOBNAME);
    subJob.__set_jobType(SubJobType::BUSINESS_SUB_JOB);
    subJob.__set_policy(ExecutePolicy::ANY_NODE);
    subJob.__set_jobInfo(backupSubJobStr);
    subJob.__set_jobPriority(SUBJOB_TYPE_TEARDOWN_PHASE_PRIO);
    subJob.__set_ignoreFailed(false);

    do {
        if (IsAbortJob()) {
            HCP_Log(WARN, MODULE) << "Exit received Abort for taskid: " << m_jobId << ", subtaskid: "
                << m_subJobId << HCPENDLOG;
            break;
        }
        int ret = CreateSubTask(subJob);
        if (ret == Module::SUCCESS) {
            break;
        } else if (ret == Module::RETRY) {
            HCP_Log(WARN, MODULE) << "Create subtask failed with retriable error"<< HCPENDLOG;
            SleepForCreateSubTaskError();
            continue;
        } else {
            HCP_Log(ERR, MODULE) << "Exit CreateSubTasksFromCtrlFile, Create subtask failed" << HCPENDLOG;
            return false;
        }
    } while (true);
    return true;
}

bool HetroBackupJob::CreateBackupCopyMetaTask()
{
    std::string backupSubJobStr;
    HetroBackupSubJob backupSubJob {};
    SubJob subJob {};
    HetroNativeGeneral generalInfo {};
    backupSubJob.m_SubTaskType = SUBJOB_TYPE_COPYMETA_PHASE;
    if (!Module::JsonHelper::StructToJsonString(backupSubJob, backupSubJobStr)) {
        HCP_Log(ERR, MODULE) << "Exit CreateBackupJobCopyMetaTask failed!" << HCPENDLOG;
        return false;
    }
    subJob.__set_jobId(m_jobId);
    subJob.__set_jobName(SUBJOB_TYPE_COPYMETA_JOBNAME);
    subJob.__set_jobType(SubJobType::BUSINESS_SUB_JOB);
    subJob.__set_policy(ExecutePolicy::ANY_NODE);
    subJob.__set_jobInfo(backupSubJobStr);
    subJob.__set_jobPriority(SUBJOB_TYPE_COPYMETA_PHASE_PRIO);
    subJob.__set_ignoreFailed(false);
    do {
        if (IsAbortJob()) {
            HCP_Log(WARN, MODULE) << "Exit received Abort for taskid: " << m_jobId << ", subtaskid: "
                << m_subJobId << HCPENDLOG;
            break;
        }
        int ret = CreateSubTask(subJob);
        if (ret == Module::SUCCESS)
            break;
        if (ret == Module::RETRY) {
            HCP_Log(WARN, MODULE) << "Create subtask failed with retriable error"<< HCPENDLOG;
            SleepForCreateSubTaskError();
            continue;
        } else {
            HCP_Log(ERR, MODULE) << "Exit CreateSubTasksFromCtrlFile, Create subtask failed" << HCPENDLOG;
            return false;
        }
    } while (true);
    return true;
}

void HetroBackupJob::ScannerCtrlFileCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    HCP_Log(DEBUG, MODULE) << "Callback Received for control File path:"<< WIPE_SENSITIVE(controlFilePath) << HCPENDLOG;
    return;
}

void HetroBackupJob::ScannerHardLinkCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    HCP_Log(DEBUG, MODULE) << "Callback Received for control File path:"<< WIPE_SENSITIVE(controlFilePath) << HCPENDLOG;
    return;
}

void HetroBackupJob::BackupDirMTimeCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    HCP_Log(DEBUG, MODULE) << "Callback Received for control File path:"<< WIPE_SENSITIVE(controlFilePath) << HCPENDLOG;
    return;
}

void HetroBackupJob::BackupDelCtrlCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    HCP_Log(DEBUG, MODULE) << "Callback Received for control File path:"<< WIPE_SENSITIVE(controlFilePath) << HCPENDLOG;
}

bool HetroBackupJob::IdentifyNasProtoVersionToUse(int64_t &errCode)
{
    if (!m_generalInfo.m_protocolVersion.empty()) {
        return true;
    }
    std::string proto = (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) ? "nfs" : "cifs";
    std::string protoVersion;

    if (m_backupJobPtr->jobParam.backupType == AppProtect::BackupJobType::INCREMENT_BACKUP) {
        Module::SmbVersion prevSmbVersion =
            HetroCommonService::ConvertStringToSmbVersion(m_prevBackupCopyInfo.m_protocolVersion);
        if ((proto == "nfs" && !CheckMountVersion(proto, m_prevBackupCopyInfo.m_protocolVersion, errCode))
            || (proto == "cifs" && !CheckSmbConnectVersion(prevSmbVersion, errCode))) {
            return false;
        }
        m_generalInfo.m_protocolVersion = m_prevBackupCopyInfo.m_protocolVersion;
        return true;
    }

    if (proto == "nfs") {
#ifndef DEVSTUB
        if (CheckMountVersion("nfs", NFS_VERSION_4, errCode)) {
            protoVersion = NFS_VERSION_4;
        } else if (CheckMountVersion("nfs", NFS_VERSION_3, errCode)) {
            protoVersion = NFS_VERSION_3;
        }
#else
        if (CheckMountVersion("nfs", NFS_VERSION_3, errCode)) {
            protoVersion = NFS_VERSION_3;
        }
#endif
    } else {
        protoVersion = GetSmbVersion(errCode);
    }

    if (protoVersion.empty()) {
        HCP_Log(ERR, MODULE) << "Select version for Proto=" << proto << " failed";
        return false;
    }

    HCP_Log(INFO, MODULE) << "Selected Proto=" << proto << ", version=" << protoVersion << HCPENDLOG;
    m_generalInfo.m_protocolVersion = protoVersion;
    return true;
}

std::string HetroBackupJob::GetSmbVersion(int64_t& errCode)
{
    std::vector<Module::SmbVersion> cifsVersion {
        Module::SmbVersion::VERSION0302,
        Module::SmbVersion::VERSION0300,
        Module::SmbVersion::VERSION0210,
        Module::SmbVersion::VERSION0202
    };
    for (auto version : cifsVersion) {
        if (CheckSmbConnectVersion(version, errCode)) {
            return HetroCommonService::ConvertSmbVersionToString(version);
        }
    }
    return "";
}

bool HetroBackupJob::CheckSmbConnectVersion(Module::SmbVersion version, int64_t& errCode)
{
    std::string agentHomePath = Module::EnvVarManager::GetInstance()->GetAgentHomePath();
    std::string krb5CcacheFile = agentHomePath + KRB5CCNAMEPREFIX + m_jobId;
    std::string krb5ConfigFile = agentHomePath + KRB5CONFIGPREFIX + m_jobId + KRB5CONFIGPOSTFIX;

    Module::SmbAuthType smbRmtAuthType;
    if (m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        smbRmtAuthType = Module::SmbAuthType::KRB5;
    } else if (m_nasShare.auth.authType == AuthType::type::NO_AUTHENTICATION ||
        m_nasShare.auth.authType == AuthType::type::OS_PASSWORD ||
        m_nasShare.auth.authType == AuthType::type::APP_PASSWORD) {
        smbRmtAuthType = Module::SmbAuthType::NTLMSSP;
    } else {
        HCP_Log(INFO, MODULE) << "Wrong authType for cifs share: " << m_nasShare.auth.authType << HCPENDLOG;
        return MP_FALSE;
    }
    bool smbEncryption = (m_nasShare.nasShareExt.m_encryption == SMB_ENCRYPTION) ? true : false;

    Module::SmbContextArgs rmtPars = {
        m_nasShare.nasShareExt.m_domainName, m_nasShare.nasShareExt.m_serviceIP, m_nasShare.sharePath,
        m_nasShare.auth.authkey, m_nasShare.auth.authPwd, krb5CcacheFile,
        krb5ConfigFile, smbEncryption, false, ONE_MINUTE, smbRmtAuthType, version
    };
    Module::SmbContextWrapper rmtSmb(rmtPars);
    if (!rmtSmb.Init()) {
        return MP_FALSE;
    }

    if (!rmtSmb.SmbConnect()) {
        errCode = HomoErrorCode::ERROR_NAS_SCAN_PROTECTED_SERVER_NOT_REACHABLE;
        return MP_FALSE;
    }

    Module::SmbContextArgs dtFsPars = {
        std::string(), m_dataFsSvcIp, "/" + m_dataFs.remoteName, m_dataFs.auth.authkey,
        m_dataFs.auth.authPwd, std::string(), std::string(), false,
        false, ONE_MINUTE, Module::SmbAuthType::NTLMSSP, version
    };
    Module::SmbContextWrapper dtFsSmb(dtFsPars);
    if (!dtFsSmb.Init()) {
        return MP_FALSE;
    }

    if (!dtFsSmb.SmbConnect()) {
        errCode = HomoErrorCode::ERROR_NAS_SCAN_SECONDARY_SERVER_NOT_REACHABLE;
        return MP_FALSE;
    }
    m_maxSmbBlockSize = rmtSmb.SmbGetMaxReadSize() < dtFsSmb.SmbGetMaxWriteSize() ?
        rmtSmb.SmbGetMaxReadSize() : dtFsSmb.SmbGetMaxWriteSize();
    HCP_Log(INFO, MODULE) << "maxSmbBlockSize: " << m_maxSmbBlockSize <<
        " readMaxSize:" << rmtSmb.SmbGetMaxReadSize() << " writeMaxSize:" << dtFsSmb.SmbGetMaxWriteSize() << HCPENDLOG;
    return MP_TRUE;
}

bool HetroBackupJob::CheckMountVersion(std::string proto, std::string protoVersion, int64_t &errCode)
{
    if (!CheckRemoteNasMount(proto, protoVersion, m_nasShare, m_tempLocalMountPath)) {
        HCP_Log(ERR, MODULE) << "CheckRemoteNasMount failed" << HCPENDLOG;
        errCode = HomoErrorCode::ERROR_NAS_SCAN_PROTECTED_SERVER_NOT_REACHABLE;
        return false;
    }
    if (!CheckDataFsMount(proto, protoVersion, m_dataFs, m_dataFsSvcIp, m_nasShare)) {
        HCP_Log(ERR, MODULE) << "CheckDataFsMount failed" << HCPENDLOG;
        errCode = HomoErrorCode::ERROR_NAS_SCAN_SECONDARY_SERVER_NOT_REACHABLE;
        return false;
    }
    return true;
}

bool HetroBackupJob::InitBackupCopyMetaInfo()
{
    if (IsFullBackup()) {
        return true;
    }
    if (!ReadBackupCopyFromFile(m_backupCopyMetaFile, m_prevBackupCopyInfo)) {
        if (m_jobCtrlPhase != JOB_CTRL_PHASE_CHECKBACKUPJOBTYPE) {
            HCP_Log(ERR, MODULE) << "Read Backup Copy meta info from file failed" << HCPENDLOG;
            return false;
        } else {
            return true;
        }
    }
    m_generalInfo.m_protocolVersion = m_prevBackupCopyInfo.m_protocolVersion;

    return true;
}

bool HetroBackupJob::IsFullBackup()
{
    return (m_backupJobPtr->jobParam.backupType == AppProtect::BackupJobType::FULL_BACKUP);
}

bool HetroBackupJob::IsSnapshotEnabled() const
{
    return m_nasHost.nasHostExt.m_useSnapshot == "1";
}

bool HetroBackupJob::SaveScannerMeta()
{
    struct stat statInfo {};
    std::string prevScanDir = m_cacheFsPath + "/backup-job/scan/meta/previous";
    std::string currScanDir = m_cacheFsPath + "/backup-job/scan/meta/latest";
    if (PathExistMandate(currScanDir)) {
        INFOLOG("rename dir %s => %s", currScanDir.c_str(), prevScanDir.c_str());
        bool res = RenameDir(currScanDir, prevScanDir);
        if (!res) {
            HCP_Log(ERR, MODULE) << "Scanner metada save rename failed" << HCPENDLOG;
            return false;
        }
    }
    return true;
}

void HetroBackupJob::RemoveCacheDirectories()
{
    INFOLOG("RemoveCacheDirectories, tempLocalMountPath: %s, metaFsPath: %s, cacheFsPath: %s",
        m_tempLocalMountPath.c_str(), m_metaFsPath.c_str(), m_cacheFsPath.c_str());
    std::string prevBackDir = m_cacheFsPath + "/backup-job/backup/prevctrl";
    std::string currBackDir = m_cacheFsPath + "/backup-job/backup/ctrl";
    std::string prevMetaDir = m_cacheFsPath + "/backup-job/scan/meta/prevmeta";
    std::string currMetaDir = m_cacheFsPath + "/backup-job/scan/meta/latest";

    RemoveDirectory(m_tempLocalMountPath);
    RemoveDirectory(m_metaFsPath + "/statistics_" + m_jobId);
    /* Rename the control files at the end of backup */
    RenameDir(currBackDir, prevBackDir);
    if (PathExistMandate(currMetaDir)) {
        RenameDir(currMetaDir, prevMetaDir);
    }
    RemoveDirectory(m_cacheFsPath + "/backup-job/scan/ctrl");
    RemoveDirectory(m_cacheFsPath + "/backup-job/backup/ctrl");
    RemoveDirectory(m_cacheFsPath + "/backup-job/scan/meta/latest");
    return;
}

void HetroBackupJob::PrintBackupCopyInfo(HetroNativeBackupStats &backupStatistics)
{
    if (m_jobResult != AppProtect::JobResult::type::SUCCESS) {
        HCP_Log(INFO, MODULE)
            << "\n\n----------------"
            << "\nBackup Job Report"
            << "\n------------------"
            << "\nStatus                : Backup failed"
            << "\nBackup Type           : " << (IsFullBackup() ? "FULL" : "INC")
            << "\nJob Id                : " << m_jobId
            << "\n\n" << HCPENDLOG;
        PrintFinalScannerStats(m_scanStats);
        PrintFinalBackupStats(backupStatistics);
        return;
    }

    time_t jobEndTime = GetCurrentTimeInSeconds();
    std::string jobStartTimeStr = FormatTimeToStr(m_generalInfo.m_jobStartTime);
    std::string jobEndTimeStr = FormatTimeToStr(jobEndTime);
    int64_t jobDuration = double(jobEndTime - m_generalInfo.m_jobStartTime);
    m_totalJobDuration = jobDuration;

    HCP_Log(INFO, MODULE)
        << "\n\n----------------"
        << "\nBackup Job Report"
        << "\n------------------"
        << "\nStatus                    : Backup succesfull"
        << "\nBackup Type               : " << (IsFullBackup() ? "FULL" : "INC")
        << "\nJob Start time            : " << jobStartTimeStr
        << "\nJob End time              : " << jobEndTimeStr
        << "\nJob Duration (seconds)    : " << jobDuration
        << "\nJob Id                    : " << m_jobId
        << "\n\n" << HCPENDLOG;

    PrintFinalScannerStats(m_scanStats);
    PrintFinalBackupStats(backupStatistics);
    return;
}

bool HetroBackupJob::KinitTGT()
{
    if (m_jobCtrlPhase == JOB_CTRL_PHASE_POSTJOB ||
        m_nasShare.nasShareExt.m_protocol != NAS_PROTOCOL_TYPE_E_CIFS ||
        m_nasShare.auth.authType != AuthType::type::KERBEROS) {
        HCP_Log(DEBUG, MODULE) << "Don't need kerberos ticket." << HCPENDLOG;
        return true;
    }
    if (!Module::JsonHelper::JsonStringToStruct(m_backupJobPtr->protectObject.auth.extendInfo,
        m_nasShare.nasShareAuthExt)) {
        HCP_Log(ERR, MODULE) << "JsonStringToStruct failed." << HCPENDLOG;
        return false;
    }
    /* set krb5 environment variable for krb5.conf */
    HCP_Log(INFO, MODULE) << "authkey: " << WIPE_SENSITIVE(m_nasShare.auth.authkey) << HCPENDLOG;

    if (KinitTGTInner(m_nasShare.auth.authkey, m_nasShare.nasShareAuthExt.secret,
        m_nasShare.nasShareAuthExt.keytab,
        m_nasShare.nasShareAuthExt.krb5Conf, m_jobId) != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Kinit TGT ticket FAILED." << HCPENDLOG;
        return false;
    }
    HCP_Log(INFO, MODULE) << "Kinit TGT ticket SUCCESS." << HCPENDLOG;

    return true;
}

void HetroBackupJob::CheckAndDeleteMetaFiles(const std::string& fileName)
{
    HCP_Log(INFO, MODULE) << "check if metafile exists: " << fileName << HCPENDLOG;
    if (PathExistMandate(fileName)) {
        HCP_Log(INFO, MODULE) << "metafile exists, remove" << HCPENDLOG;
        RemoveFile(fileName);
    }
}

std::vector<Module::CmdParam> HetroBackupJob::FillCopyMetaCmd(const std::string& scanMetaFilePath,
    const std::string& dirCacheZipFileName, const std::string& fCacheZipFileName,
    const std::string& metaZipFilename, const std::string& xmetaZipFileName)
{
    std::vector<Module::CmdParam> cmd { CmdParam(COMMON_CMD_NAME, "cd"),
        CmdParam(PATH_PARAM, scanMetaFilePath), CmdParam(CONTINUOUS_PARAM, ";"),
        CmdParam(COMMON_CMD_NAME, "tar"), CmdParam(CMD_OPTION_PARAM, "-cf"),
        CmdParam(CMD_OPTION_PARAM, "-"), CmdParam(COMMON_PARAM, "dircache"),
        CmdParam(WILDCARD_CHAR_PARAM, "*", false), CmdParam(COMMON_PARAM, "metafile_count.txt"),
        CmdParam(COMMON_PARAM, "scanner_status.txt"), CmdParam(PIPELINE_PARAM, "|"),
        CmdParam(COMMON_CMD_NAME, "pigz"),
        CmdParam(CMD_OPTION_PARAM, "-1k"),
        CmdParam(REDIRECTOUT_PARAM, ">"),
        CmdParam(PATH_PARAM, dirCacheZipFileName),
        CmdParam(LOGICAND_PARAM, "&&"),
        CmdParam(COMMON_CMD_NAME, "tar"),
        CmdParam(CMD_OPTION_PARAM, "-cf"),
        CmdParam(CMD_OPTION_PARAM, "-"),
        CmdParam(COMMON_PARAM, "filecache_"),
        CmdParam(WILDCARD_CHAR_PARAM, "*", false),
        CmdParam(PIPELINE_PARAM, "|"),
        CmdParam(COMMON_CMD_NAME, "pigz"),
        CmdParam(CMD_OPTION_PARAM, "-1k"),
        CmdParam(REDIRECTOUT_PARAM, ">"),
        CmdParam(PATH_PARAM, fCacheZipFileName),
        CmdParam(LOGICAND_PARAM, "&&"),
        CmdParam(COMMON_CMD_NAME, "tar"),
        CmdParam(CMD_OPTION_PARAM, "-cf"),
        CmdParam(CMD_OPTION_PARAM, "-"),
        CmdParam(COMMON_PARAM, "meta_file_"),
        CmdParam(WILDCARD_CHAR_PARAM, "*", false),
        CmdParam(PIPELINE_PARAM, "|"),
        CmdParam(COMMON_CMD_NAME, "pigz"),
        CmdParam(CMD_OPTION_PARAM, "-1k"),
        CmdParam(REDIRECTOUT_PARAM, ">"),
        CmdParam(PATH_PARAM, metaZipFilename),
        CmdParam(LOGICAND_PARAM, "&&"),
        CmdParam(COMMON_CMD_NAME, "tar"),
        CmdParam(CMD_OPTION_PARAM, "-cf"),
        CmdParam(CMD_OPTION_PARAM, "-"),
        CmdParam(COMMON_PARAM, "xmeta_file_"),
        CmdParam(WILDCARD_CHAR_PARAM, "*", false),
        CmdParam(PIPELINE_PARAM, "|"),
        CmdParam(COMMON_CMD_NAME, "pigz"),
        CmdParam(CMD_OPTION_PARAM, "-1k"),
        CmdParam(REDIRECTOUT_PARAM, ">"),
        CmdParam(PATH_PARAM, xmetaZipFileName),
    };
    return cmd;
}

int HetroBackupJob::CopyMetaFileToMetaRepo(const std::string& scanMetaFilePath)
{
    CopyMetaFileToMetaRepoMkdir(scanMetaFilePath);
    string dirCacheZipFileName = m_metaFsPath + "/filemeta/metafile_DIRCACHE.gz";
    string fCacheZipFileName = m_metaFsPath + "/filemeta/metafile_FILECACHE.gz";
    string metaZipFilename = m_metaFsPath + "/filemeta/metafile_META.gz";
    string xmetaZipFileName = m_metaFsPath + "/filemeta/metafile_XMETA.gz";
    string oldVersionScannerMetaZipFileName  = m_metaFsPath + "/filemeta/metafile.zip";
    std::unordered_set<std::string> pathWhileList = {
        m_metaFsPath + "/filemeta",
        scanMetaFilePath
    };
    std::vector<std::string> result;
    CheckAndDeleteMetaFiles(dirCacheZipFileName);
    CheckAndDeleteMetaFiles(fCacheZipFileName);
    CheckAndDeleteMetaFiles(metaZipFilename);
    CheckAndDeleteMetaFiles(xmetaZipFileName);
    // check and remove if old file format file exist
    CheckAndDeleteMetaFiles(oldVersionScannerMetaZipFileName);
    std::vector<Module::CmdParam> cmd = FillCopyMetaCmd(scanMetaFilePath, dirCacheZipFileName, fCacheZipFileName,
        metaZipFilename, xmetaZipFileName);
    result.clear();
    int ret = 0;
    int retryCnt = 0;
    do {
        ret = Module::RunCommand("cd", cmd, result, pathWhileList);
        if (ret != 0) {
            WARNLOG("Exec cmd failed! %d, remove file and retry: %d", ret, retryCnt);
            for (const std::string& str : result) {
                WARNLOG("result str: %s", str.c_str());
            }
            CheckAndDeleteMetaFiles(dirCacheZipFileName);
            CheckAndDeleteMetaFiles(fCacheZipFileName);
            CheckAndDeleteMetaFiles(metaZipFilename);
            CheckAndDeleteMetaFiles(xmetaZipFileName);
        }
    } while (ret != 0 && ++retryCnt <= NUM_10);
    if (ret != 0) {
        ERRLOG("Exec cmd failed! %d", ret);
        m_isZipSuccess = false;
        m_isCopying = false;
        return Module::FAILED;
    }
    HCP_Log(INFO, MODULE) << "Copy meta file finish , set isCopying to false." << HCPENDLOG;
    m_isCopying = false;
    return Module::SUCCESS;
}

void HetroBackupJob::CopyMetaFileToMetaRepoMkdir(const std::string& scanMetaFilePath)
{
    HCP_Log(INFO, MODULE) << "Enter CopyMetaFileToMetaRepo : " << scanMetaFilePath << " , "
        << m_metaFsPath << HCPENDLOG;
    std::vector<Module::CmdParam> mkdirCmd {
        CmdParam(COMMON_CMD_NAME, "mkdir"),
        CmdParam(CMD_OPTION_PARAM, "-p"),
        CmdParam(PATH_PARAM, m_metaFsPath + "/filemeta")
    };
    std::unordered_set<std::string> pathWhite = {
        m_metaFsPath + "/filemeta",
        scanMetaFilePath
    };
    std::vector<std::string> result;
    (void)Module::RunCommand("mkdir", mkdirCmd, result, pathWhite);
}

void HetroBackupJob::KeepPluginAlive()
{
    HCP_Log(INFO, MODULE) << "Enter KeepPluginAlive" << HCPENDLOG;
    ActionResult result;
    SubJobDetails subJobDtls;
    LogDetail logDetail{};
    std::vector<LogDetail> logDetails;
    uint32_t reportCnt = 0;
    while (!m_isAbort && !m_generateSubjobFinish) {
        // 10s 检查一次退出条件
        std::this_thread::sleep_for(std::chrono::seconds(REPORT_RUNNING_INTERVAL));
        // 60s 上报一次
        if (reportCnt % REPORT_RUNNING_TIMES == 0) {
            REPORT_LOG2AGENT(subJobDtls, result, logDetails, logDetail, 0, 0, SubJobStatus::RUNNING);
        }
        reportCnt++;
    }
}

int HetroBackupJob::HandlePostJobFailed()
{
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS
        && m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        CleanKrbFilesForCifs(m_jobId);
    }
    RemoveCertification(m_backupJobPtr->protectEnv);
    RemoveCacheDirectories();
    DeleteSharedResources(m_jobId);
    ReportJobDetailsWithErrorCode(SubJobStatus::FAILED, PROGRESS0, "", JobLogLevel::TASK_LOG_ERROR,
        HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
    return Module::FAILED;
}