/*
* This file is a part of the open-eBackup project.
* This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
* If a copy of the MPL was not distributed with this file, You can obtain one at
* http://mozilla.org/MPL/2.0/.
*
* Copyright (c) [2024] Huawei Technologies Co.,Ltd.
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
*/
#include "nas_share/restore/HetroRestoreJob.h"
#include "config_reader/ConfigIniReader.h"
#include "common/CleanMemPwd.h"
#include "common/EnvVarManager.h"
#include "common/Utils.h"
#include "client/ClientInvoke.h"
#include "constant/ErrorCodes.h"
#include "client/ClientInvoke.h"
#include "ScanMgr.h"
#include "security/cmd/CmdParam.h"
#include "security/cmd/CmdExecutor.h"
#include "utils/PluginConfig.h"

using namespace PluginUtils;
using namespace std;
using namespace Module;

#define ENTER                                                                                                   \
{                                                                                                               \
    m_mainJobRequestId = GenerateHash(m_jobId);                                                                 \
    HCPTSP::getInstance().reset(m_mainJobRequestId);                                                            \
    HCP_Log(INFO, MODULE) << "Enter " << m_jobCtrlPhase                                                         \
                            << ", jobId: " << m_jobId                                                           \
                            << ", subjobId:" << ((m_subJobInfo == nullptr) ? "" : m_subJobId)                   \
                            << HCPENDLOG;                                                                       \
}

#define EXIT                                                                                                    \
{                                                                                                               \
    HCP_Log(INFO, MODULE) << "Exit " << m_jobCtrlPhase                                                          \
                            << ", jobId: " << m_jobId                                                           \
                            << ", subjobId:" << ((m_subJobInfo == nullptr) ? "" : m_subJobId)                   \
                            << HCPENDLOG;                                                                       \
}

namespace {
    constexpr auto MODULE = "HetroRestoreJob";
    constexpr uint32_t SCANNER_REPORT_CIRCLE_TIME = 60;  /* seconds */
    constexpr uint32_t DEFAULT_AGGREGATE_NUM = 32;
    constexpr uint32_t MAX_AGGREGATE_NUM = 1000;
    constexpr uint32_t DEFAULT_SMB_BLOCK_SIZE = 128 * 1024; // 128k
    constexpr auto FLR_RESTORE_TYPE_ORIGIN_VALUE = "original";
    constexpr auto FLR_RESTORE_TYPE_NEW_VALUE = "newHomo";
    constexpr auto FLR_RESTORE_TYPE_NEW_VALUE2 = "new";
    constexpr auto FLR_RESTORE_TYPE_NATIVE_VALUE = "native";
    constexpr auto RESTORE_TYPE_HETER_NEW_VALUE = "newHeter";
    constexpr int FIRST_GENERATE_CONTROL_FILE = 0;
    constexpr int MAX_OPEN_DIR_REQ_4000 = 4000;
    constexpr uint32_t NUM_10 = 10;
    constexpr uint8_t INIT_INT_VALUE = 0;
    constexpr uint8_t RETRY_CNT = 3;
    constexpr uint32_t REPORT_RUNNING_INTERVAL = 10;
    constexpr uint32_t REPORT_RUNNING_TIMES = 6;
    constexpr int HOST_PROTOCOL_TYPE_NFS = 1;
    constexpr int HOST_PROTOCOL_TYPE_CIFS = 2;
    constexpr int HOST_PROTOCOL_TYPE_NFS_CIFS = 3;
    constexpr int HOST_PROTOCOL_TYPE_ALL = 999; // for E series
    const int DEFAULT_SUB_JOB_CNT = 25;
    // X3000 Internal Agent Max Channel number
    const int INTERNAL_MAX_CHANNEL_NUM = 15;
    const int FAILURE_OUTPUT_LINE = 100;
}

bool HetroRestoreJob::GetRestoreJobInfo()
{
    if (GetJobInfo() != nullptr) {
        m_restoreJobPtr = dynamic_pointer_cast<AppProtect::RestoreJob>(GetJobInfo()->GetJobInfo());
    }
    if (m_restoreJobPtr == nullptr) {
        HCP_Log(ERR, MODULE) << "Failed to get backupJobPtr." << HCPENDLOG;
        ReportJobDetailsWithErrorCode(SubJobStatus::FAILED, PROGRESS0,
            "nas_plugin_hetro_restore_data_fail_label", JobLogLevel::TASK_LOG_ERROR,
            HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        return false;
    }
    GetRestoreType();
    SetMainJobId(m_restoreJobPtr->jobId);
    SetSubJobId();
    InitJobInfo();
    return true;
}

HetroRestoreJob::~HetroRestoreJob()
{
    Module::CleanMemoryPwd(m_dataFs.auth.authPwd);
    Module::CleanMemoryPwd(m_dataFs.auth.extendInfo);
    Module::CleanMemoryPwd(m_metaFs.auth.authPwd);
    Module::CleanMemoryPwd(m_metaFs.auth.extendInfo);
    Module::CleanMemoryPwd(m_cacheFs.auth.authPwd);
    Module::CleanMemoryPwd(m_cacheFs.auth.extendInfo);
    Module::CleanMemoryPwd(m_nasHost.auth.authPwd);
    Module::CleanMemoryPwd(m_nasHost.auth.extendInfo);
    Module::CleanMemoryPwd(m_nasShare.auth.authPwd);
    Module::CleanMemoryPwd(m_nasShare.auth.extendInfo);
}

int HetroRestoreJob::PrerequisiteJob()
{
    SetJobCtrlPhase(JOB_CTRL_PHASE_PREJOB);
    if (!GetRestoreJobInfo()) {
        SetJobToFinish();
        return MP_FAILED;
    }
    if (!IsPigzExist()) {
        ReportJobDetails(SubJobStatus::FAILED, PROGRESS0,
            "plugin_check_pigz_failed_label", JobLogLevel::TASK_LOG_ERROR);
        return Module::FAILED;
    }
    ENTER
    int ret = PrerequisiteJobInner();
    EXIT
    SetJobToFinish();
    return ret;
}

int HetroRestoreJob::GenerateSubJob()
{
    SetJobCtrlPhase(JOB_CTRL_PHASE_GENSUBJOB);
    if (!GetRestoreJobInfo()) {
        SetJobToFinish();
        return MP_FAILED;
    }
    int ret;
    ENTER
    m_generateSubjobFinish = false;
    // keep alive thread , used for report main job
    std::thread keepAlive = std::thread(&HetroRestoreJob::KeepPluginAlive, this);
    if (IsAbortJob()) {
        ERRLOG("Job aborted, skip scanner.");
    } else {
        RegisterScanTask();
        std::shared_ptr<void> defer(nullptr, [&](...) { ReleaseScanTask(); });
        if (m_aggregateRestore) {
            ret = GenerateAggregateSubJobInner();
        } else if (m_justRestoreByScan) {
            ret = GenerateSubJobInnerForFileSystem();
        } else {
            ret = GenerateSubJobInner();
        }
    }
    m_generateSubjobFinish = true;
    if (keepAlive.joinable()) {
        keepAlive.join();
        INFOLOG("keep alive thread join!");
    }
    EXIT
    ClearGenerateSubJobEnv();
    ReportJobCompleteStatus(m_restoreScanStatistics, m_restoreJobStatus, m_restoreJobLogLabel, m_restoreJobProgress);

    SetJobToFinish();
    return ret;
}

int HetroRestoreJob::ExecuteSubJob()
{
    AddSubJobToChannelMgr();
    std::shared_ptr<void> delSubJob(nullptr, [&](...) {
        RemoveSubJobFromChannelMgr();
    });
    HetroBackupSubJob backupSubJob {};
    SetJobCtrlPhase(JOB_CTRL_PHASE_EXECSUBJOB);
    if (!GetRestoreJobInfo() || GetExecuteSubJobType() != Module::SUCCESS) {
        SetJobToFinish();
        return Module::FAILED;
    }
    ENTER
    PrintSubJobInfo(m_subJobInfo);
    Json::Value js;
    if (!Module::JsonHelper::JsonStringToStruct(m_subJobInfo->jobInfo, backupSubJob) ||
    !Module::JsonHelper::JsonStringToJsonValue(m_restoreJobPtr->extendInfo, js)) {
        HCP_Log(ERR, MODULE) << "Get restore subjob info failed" << HCPENDLOG;
        ReportJobDetailsWithErrorCode(SubJobStatus::FAILED, PROGRESS0,
            "nas_plugin_hetro_restore_data_fail_label", JobLogLevel::TASK_LOG_ERROR,
            HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        EXIT
        SetJobToFinish();
        return MP_FAILED;
    }
    backupSubJob.m_orderOfRestore = (js["orderOfRestore"].asString()).empty() ? "0" : js["orderOfRestore"].asString();
    m_subTaskType = backupSubJob.m_SubTaskType;
    m_subJobRequestId = GenerateHash(m_jobId + m_subJobId);
    m_copyId = backupSubJob.copyId;
    HCP_Log(INFO, MODULE)  << "mainJob ID: " << m_jobId << ", subJob ID: "
        << m_subJobId << ", subJobRequestId: 0x" << std::setw(NUMBER8) << std::setfill('0') << std::hex
        << (m_subJobRequestId & 0xFFFFFFFF) << std::dec << HCPENDLOG;
    HCPTSP::getInstance().reset(m_subJobRequestId);

    int ret = MP_FAILED;
    if (m_subTaskType == SUBJOB_TYPE_CREATE_SUBJOB_PHASE) {
        m_generateSubjobFinish = false;
        std::thread keepAlive = std::thread(&HetroRestoreJob::KeepPluginAlive, this);
        ret = ExecuteCreateSubJobInner(backupSubJob);
        m_generateSubjobFinish = true;
        keepAlive.join();
    } else if (m_subTaskType == SUBJOB_TYPE_CHECK_SUBJOB_PHASE) {
        ret = ExecuteCheckSubJobInner();
    } else {
        ret = ExecuteRestoreSubJobInner(backupSubJob);
    }
    EXIT
    SetJobToFinish();
    return ret;
}

int HetroRestoreJob::PostJob()
{
    SetJobCtrlPhase(JOB_CTRL_PHASE_POSTJOB);
    if (!GetRestoreJobInfo()) {
        SetJobToFinish();
        return MP_FAILED;
    }
    AddSubJobToChannelMgr();
    std::shared_ptr<void> delSubJob(nullptr, [&](...) {
        RemoveSubJobFromChannelMgr();
    });
    ENTER
    int ret = PostJobInner();
    EXIT
    SetJobToFinish();
    return ret;
}

int HetroRestoreJob::PrerequisiteJobInner()
{
    int64_t errCode = HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR;
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);

    ApplicationEnvExtent applicationEnvExtentInfo {};
    if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->targetEnv.extendInfo, applicationEnvExtentInfo)) {
        HCP_Log(ERR, MODULE) << "ApplicationEnvExtentInfo is invaild." << HCPENDLOG;
        return HandlePrerequisiteJobFailed(errCode);
    }
    if (std::stoi(applicationEnvExtentInfo.m_verifyStatus) == MP_TRUE) {
        if (callCheckCertThumbPrint(m_restoreJobPtr->targetEnv) != MP_SUCCESS) {
            HCP_Log(ERR, MODULE) << "CheckCert thumbprint failed" << HCPENDLOG;
            return HandlePrerequisiteJobFailed(errCode);
        }
    }
    if (!InitJobInfo()) {
        HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
        return HandlePrerequisiteJobFailed(errCode);
    }
    PrintJobInfo();
    // 添加路由
    if (!OperateIpsRule(m_IpRuleList, "ADD") ||
        !OperateIpsRuleForRemoteIp(m_remoteIpRuleList, "ADD", m_nasShare.nasShareExt.m_serviceIP)) {
        ERRLOG("add ip rule failed");
        return HandlePrerequisiteJobFailed(errCode);
    }
    PluginUtils::CreateDirectory(m_failureRecordRoot);
    if (!CheckNasSharesReachable(errCode) ||
        !CreateSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        HCP_Log(ERR, MODULE) << "CreateSharedResources failed" << HCPENDLOG;
        return HandlePrerequisiteJobFailed(errCode);
    }
    m_generalInfo.m_jobStartTime = GetCurrentTimeInSeconds();
    if (!SetupCacheFsForRestoreJob()) {
        HCP_Log(ERR, MODULE) << "SetupCacheFsForRestoreJob failed" << HCPENDLOG;
        return HandlePrerequisiteJobFailed(errCode);
    }
    if (!UpdateGeneralResource(m_jobId, m_generalInfo)) {
        HCP_Log(ERR, MODULE) << "UpdateGeneralResource failed" << HCPENDLOG;
        return HandlePrerequisiteJobFailed(errCode);
    }
    ReportJobDetails(SubJobStatus::COMPLETED, PROGRESS100, "", JobLogLevel::TASK_LOG_INFO);
    return MP_SUCCESS;
}

int HetroRestoreJob::callCheckCertThumbPrint(const AppProtect::ApplicationEnvironment& targetEnv)
{
    EnvAuthExtendInfo authExtendInfo {};
    if (!Module::JsonHelper::JsonStringToStruct(targetEnv.auth.extendInfo, authExtendInfo)) {
        HCP_Log(ERR, MODULE) << "NasAuthExtendInfo is invaild." << HCPENDLOG;
        return MP_FAILED;
    }
    const std::string thumbPrint = authExtendInfo.m_fingerPrint;
    const std::string endpoint = targetEnv.endpoint;
    const int32_t port = targetEnv.port;
    HCP_Log(INFO, MODULE) << "CheckCertThumbprint param"
        << ", endpoint: " << endpoint
        << ", port: " << port
        << ", thumbprint: " << WIPE_SENSITIVE(thumbPrint) << HCPENDLOG;
    ActionResult ret;
    SecurityService::CheckCertThumbPrint(ret, endpoint, port, thumbPrint);
    if (ret.code != MP_SUCCESS) {
        HCP_Log(ERR, MODULE) << "CheckCertThumbprint failed"
        << ", endpoint: " << endpoint
        << ", port: " << port
        << ", thumbprint: " << WIPE_SENSITIVE(thumbPrint) << HCPENDLOG;
        return MP_FAILED;
    }
    return MP_SUCCESS;
}

void HetroRestoreJob::ReportJobCompleteStatus(HetroNativeScanStatistics &scanStatistics, SubJobStatus::type &jobStatus,
    std::string &jobLogLabel, int &jobProgress)
{
    int64_t errCode = HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR;

    if (jobStatus == SubJobStatus::COMPLETED) {
        ReportScannerCompleteStatus(m_scanStatistics);
    } else {
        if (scanStatistics.m_scanStatus == (int)SCANNER_STATUS::PROTECTED_SERVER_NOT_REACHABLE) {
            errCode = HomoErrorCode::ERROR_NAS_SCAN_PROTECTED_SERVER_NOT_REACHABLE;
        } else if (scanStatistics.m_scanStatus == (int)SCANNER_STATUS::SECONDARY_SERVER_NOT_REACHABLE) {
            errCode = HomoErrorCode::ERROR_NAS_SCAN_SECONDARY_SERVER_NOT_REACHABLE;
        }

        ReportJobDetailsWithErrorCode(jobStatus, jobProgress, jobLogLabel, JobLogLevel::TASK_LOG_ERROR, errCode);
    }
    return;
}

int HetroRestoreJob::GenerateSubJobInner()
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0,
        "nas_plugin_hetro_restore_scan_start_label", JobLogLevel::TASK_LOG_INFO);
    m_lastScannerReportTime = GetCurrentTimeInSeconds();

    m_restoreScanStatistics.m_scanStartTime = GetCurrentTimeInSeconds();
    if (!InitJobInfo()) {
        HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
        return Module::FAILED;
    }
    PrintJobInfo();
    if (!GetSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        HCP_Log(ERR, MODULE) << "GetSharedResources failed" << HCPENDLOG;
        return Module::FAILED;
    }
    if (!InitPathForRestore()) {
        HCP_Log(ERR, MODULE) << "InitPathForRestore failed" << HCPENDLOG;
        return Module::FAILED;
    }
    if (!UnzipDcachefiles()) {
        HCP_Log(ERR, MODULE) << "unzip dcache and fcache failed" << HCPENDLOG;
        return Module::FAILED;
    }

    if (!StartScanner()) {
        HCP_Log(ERR, MODULE) << "Start Scanner Failed" << HCPENDLOG;
        return Module::FAILED;
    }

    MonitorScanner(m_restoreScanStatistics, m_restoreJobStatus, m_restoreJobLogLabel, m_restoreJobProgress);

    ScanStatistics statistic = m_scanner->GetStatistics();
    m_scanStatistics = AddScanStatistics(statistic, m_scanStatistics);

    return Module::SUCCESS;
}

void HetroRestoreJob::ClearGenerateSubJobEnv()
{
    HCP_Log(INFO, MODULE) << "Enter ClearGenerateSubJobEnv" << HCPENDLOG;
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS
        && m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        CleanKrbFilesForCifs(m_jobId);
    }
    if (m_scanner != nullptr) {
        m_scanner->Destroy();
    }
}

int HetroRestoreJob::GenerateSubJobInnerForFileSystem()
{
    HCP_Log(INFO, MODULE) << "Enter int HetroRestoreJob::GenerateSubJobByScannerInner" << HCPENDLOG;
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0,
        "nas_plugin_hetro_restore_scan_start_label", JobLogLevel::TASK_LOG_INFO);
    if (!InitJobInfo()) {
        HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
        return Module::FAILED;
    }

    if (!InitPathForRestore()) {
        HCP_Log(ERR, MODULE) << "Init path for restore failed" << HCPENDLOG;
        return Module::FAILED;
    }

    if (StartScannerForFileSystem() != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Start Scanner Failed" << HCPENDLOG;
        return Module::FAILED;
    }

    MonitorScanner(m_restoreScanStatistics, m_restoreJobStatus, m_restoreJobLogLabel, m_restoreJobProgress);

    ScanStatistics statistic = m_scanner->GetStatistics();
    m_scanStatistics = AddScanStatistics(statistic, m_scanStatistics);

    return Module::SUCCESS;
}


int HetroRestoreJob::StartScannerForFileSystem()
{
    HCP_Log(DEBUG, MODULE) << "Enter StartScannerForFileSystem" << HCPENDLOG;
    ScanConfig scanConfig {};
    FillScanConfig(scanConfig);
    SpecialDealScanConfig(scanConfig);
    if (AddFilterRuleForFileSystem(scanConfig) != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Add filter rule for file system Failed" << HCPENDLOG;
        return Module::FAILED;
    }

    m_scanner =  ScanMgr::CreateScanInst(scanConfig);
    if (m_scanner == nullptr) {
        ERRLOG("Start Scanner failed!");
        return false;
    }

    SpecialDealScanner();

    if (m_scanner->Start() != SCANNER_STATUS::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Start scanner instance failed" << HCPENDLOG;
        m_scanner->Destroy();
        return Module::FAILED;
    }
    return Module::SUCCESS;
}

int HetroRestoreJob::AddFilterRuleForFileSystem(ScanConfig& scanConfig)
{
    HCP_Log(DEBUG, MODULE) << "Enter AddFilterRuleForFileSystem" << HCPENDLOG;
    vector<string> dirFilterRule;
    vector<string> fileFilterRule;
    for (size_t i = 0; i < m_restoreJobPtr->restoreSubObjects.size(); ++i) {
        string path = m_restoreJobPtr->restoreSubObjects[i].name;
        // is dir or file , the last char is not '/' is file
        if (path.back() != '/') { // file
            HCP_Log(INFO, MODULE)<< "filter[" << i << "], file filter" << path << HCPENDLOG;
            fileFilterRule.push_back(path);
        } else { // dir
            HCP_Log(INFO, MODULE) << "filter[" << i << "], file filter" << path << HCPENDLOG;
            dirFilterRule.push_back(path);
        }
    }
    ScanDirectoryFilter scanDirectoryFilter;
    scanDirectoryFilter.dirList = dirFilterRule;
    scanDirectoryFilter.type = INCLUDE;

    ScanFileFilter scanFileFilter;
    scanFileFilter.fileList = fileFilterRule;
    scanFileFilter.type = INCLUDE;

    scanConfig.dFilter = scanDirectoryFilter;
    scanConfig.fFilter = scanFileFilter;
    return Module::SUCCESS;
}

int HetroRestoreJob::SpecialDealScanConfig(ScanConfig& scanConfig)
{
    HCP_Log(INFO, MODULE) << "Enter SpecialDealScanConfig" << HCPENDLOG;
    if (m_justRestoreByScan) {
        scanConfig.scanType = ScanJobType::FULL;
        scanConfig.metaPath = m_dcaheAndFcachePath;
        m_dcaheAndFcachePath = m_dcaheAndFcachePath + "/latest"; // 扫描会在此目录下创建latest目录存放metafile文件
        if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
            scanConfig.scanIO = IOEngine::LIBNFS;
        } else {
            scanConfig.scanIO = IOEngine::LIBSMB2;
        }
    }
    return Module::SUCCESS;
}

void HetroRestoreJob::SpecialDealScanner()
{
    HCP_Log(INFO, MODULE) << "Enter SpecialDealScanner" << HCPENDLOG;
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
        m_scanner->Enqueue(".");
    } else {
        m_scanner->Enqueue("");
    }
}

void HetroRestoreJob::GetRestoreType()
{
    HCP_Log(INFO, MODULE) << "Enter GetRestoreType" << HCPENDLOG;
    if (m_restoreJobPtr->jobParam.restoreType == AppProtect::RestoreJobType::type::FINE_GRAINED_RESTORE) {
        m_fineGrainedRestore = true;
        HCP_Log(INFO, MODULE) << "=====This is FINE_GRAINED_RESTORE=====" << HCPENDLOG;
    }
    if (m_restoreJobPtr->copies[0].formatType == CopyFormatType::type::INNER_DIRECTORY) {
        m_aggregateRestore = true;
        HCP_Log(INFO, MODULE) << "=====This is AGGREGATE_RESTORE=====" << HCPENDLOG;
    }

    // 副本为同构生成的，走扫描
    if (m_restoreJobPtr->copies[0].protectObject.subType == "NasFileSystem") {
        m_justRestoreByScan = true;
        HCP_Log(INFO, MODULE) << "=====Just Restore By Scan=====" << HCPENDLOG;
    }
    if (m_restoreJobPtr->copies[0].dataType == AppProtect::CopyDataType::TAPE_STORAGE_COPY) {
        m_tapeCopy = true;
        HCP_Log(INFO, MODULE) << "=====This is TYPE STORAGE COPY=====" << HCPENDLOG;
    }
}

void HetroRestoreJob::ReportJobProgress(SubJobStatus::type &jobStatus, std::string &jobLogLabel)
{
    if (jobStatus ==  SubJobStatus::COMPLETED) {
        ReportJobDetails(jobStatus, PROGRESS100, "", JobLogLevel::TASK_LOG_INFO);
    } else {
        int64_t errCode = HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR;
        switch (m_backupStatus) {
            case BackupPhaseStatus::FAILED_NOACCESS:
                errCode = HomoErrorCode::ERROR_BACKUP_FAILED_NOACCESS_ERROR;
                break;
            case BackupPhaseStatus::FAILED_NOSPACE:
                errCode = HomoErrorCode::ERROR_BACKUP_FAILED_NOSPACE_ERROR;
                break;
            case BackupPhaseStatus::FAILED_PROT_SERVER_NOTREACHABLE:
                errCode = HomoErrorCode::ERROR_NAS_BACKUP_PROTECTED_SERVER_NOT_REACHABLE;
                break;
            case BackupPhaseStatus::FAILED_SEC_SERVER_NOTREACHABLE:
                errCode = HomoErrorCode::ERROR_NAS_BACKUP_SECONDARY_SERVER_NOT_REACHABLE;
                break;
            default:
                errCode = HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR;
        }

        ReportJobDetailsWithErrorCode(jobStatus, PROGRESS0, jobLogLabel, JobLogLevel::TASK_LOG_ERROR, errCode);
    }

    return;
}

bool HetroRestoreJob::RetryUpdateBackupStartTime(HetroBackupSubJob& backupSubJob, const string& mountPath)
{
    int retry = INIT_INT_VALUE;
    bool ret = false;
    while (!(ret = UpdateBackupStartTimeInSharedResource(backupSubJob, mountPath)) && retry < RETRY_CNT) {
        sleep(NUM_10 * ++retry);
    }
    return ret;
}

int HetroRestoreJob::ExecuteCheckSubJobInner()
{
    INFOLOG("Enter Check SubJob statistics");
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    SubJobStatus::type jobStatus = SubJobStatus::FAILED;
    std::shared_ptr<void> defer(nullptr, [&](...) {
        ReportJobDetails(jobStatus, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    });
    if (!InitJobInfo()) {
        return Module::SUCCESS;
    }
    HetroNativeBackupStats backupStatistics {};
    CalculateJobStats();
    std::string filePath = m_cacheFsPath + "/statistics_" + m_jobId + "/backup-stats-main-" + m_jobId + ".json";
    ReadBackupStatsFromFile(filePath, backupStatistics);
    if (backupStatistics.m_noOfDirFailed == 0 && backupStatistics.m_noOfFilesFailed == 0) {
        INFOLOG("subjob statistics check success, main job report success");
        jobStatus = SubJobStatus::COMPLETED;
    }
    return Module::SUCCESS;
}

int HetroRestoreJob::ExecuteRestoreSubJobInner(HetroBackupSubJob backupSubJob)
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    BackupStats backupStatistics {};
    int jobProgress = 0, retryCnt = 0;
    SubJobStatus::type jobStatus = SubJobStatus::FAILED;
    std::string jobLogLabel = "nas_plugin_hetro_restore_data_fail_label";
    std::string mountPath = "/mnt/DataBackup/" + m_subJobId;
    g_nodeLevelTaskInfo.Insert(m_jobId);
    g_nodeLevelTaskInfo.IncrSubTasksCount();
    std::shared_ptr<void> defer(nullptr, [&](...) {
        OperateIpsRule(m_IpRuleList, "DELETE");
        OperateIpsRuleForRemoteIp(m_remoteIpRuleList, "DELETE", m_nasShare.nasShareExt.m_serviceIP);
    });
    if (!InitJobInfo() || !OperateIpsRule(m_IpRuleList, "ADD") ||
        !OperateIpsRuleForRemoteIp(m_remoteIpRuleList, "ADD", m_nasShare.nasShareExt.m_serviceIP)) {
        HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
        return CleanAndReportDetailedStatus(backupSubJob, jobStatus, jobLogLabel, mountPath, backupStatistics);
    }

    if (!PrintJobInfo() || !GetSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        HCP_Log(ERR, MODULE) << "GetSharedResources failed" << HCPENDLOG;
        return CleanAndReportDetailedStatus(backupSubJob, jobStatus, jobLogLabel, mountPath, backupStatistics);
    }
    /* update the control file path */
    backupSubJob.m_ControlFile = m_cacheFsPath + backupSubJob.m_ControlFile;
    INFOLOG("control file is: %s, jobId: %s, subJobId: %s", backupSubJob.m_ControlFile.c_str(), m_jobId.c_str(), m_subJobId.c_str());
    if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_DELETE_PHASE && !MountRemoteShare(mountPath))
        return CleanAndReportDetailedStatus(backupSubJob, jobStatus, jobLogLabel, mountPath, backupStatistics);

    if (!RetryUpdateBackupStartTime(backupSubJob, mountPath)) {
        HCP_Log(ERR, MODULE) << "UpdateBackupStartTimeInSharedResource failed" << HCPENDLOG;
        m_backupStatus = BackupPhaseStatus::FAILED_NOACCESS;
        return CleanAndReportDetailedStatus(backupSubJob, jobStatus, jobLogLabel, mountPath, backupStatistics);
    }

    MONITOR_BACKUP_RES_TYPE monitorRet;
    do {
        if (!StartBackup(backupSubJob)) {
            return CleanAndReportDetailedStatus(backupSubJob, jobStatus, jobLogLabel, mountPath, backupStatistics);
        }
        monitorRet = MonitorBackup(backupStatistics, jobStatus, jobLogLabel, jobProgress);
        DestroyBackup();
    } while (monitorRet == MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_NEEDRETRY && ++retryCnt < RETRY_CNT);

    if (retryCnt >= RETRY_CNT && monitorRet == MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_NEEDRETRY) {
        // seems this sub job is stuck for some reason , copy this control file to meta repo for further check
        INFOLOG("subjob is stuck, %s, copy controlFile: %s", m_subJobId.c_str(), backupSubJob.m_ControlFile.c_str());
        CopyFile(backupSubJob.m_ControlFile, m_metaFsPath);
    }
    return CleanAndReportDetailedStatus(backupSubJob, jobStatus, jobLogLabel, mountPath, backupStatistics);
}

void HetroRestoreJob::DestroyBackup()
{
    if (m_backup != nullptr) {
        m_backup->Destroy();
        m_backup.reset();
    }
}

void HetroRestoreJob::HandleExecuteInnerError()
{
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS
        && m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        CleanKrbFilesForCifs(m_jobId);
    }
    g_nodeLevelTaskInfo.DecrSubTasksCount();
}

int HetroRestoreJob::ExecuteCreateSubJobInner(HetroBackupSubJob backupSubJob)
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);

    SubJobStatus::type jobStatus = SubJobStatus::FAILED;
    std::string jobLogLabel {};

    HCP_Log(INFO, MODULE) << "Enter ExecuteCreateSubJobInner" << HCPENDLOG;
    if (IsAggregate()) {
        m_aggregateRestore = true;
        m_orderNumberForAggregate = backupSubJob.orderNumberForAggregate;
        if (InitAggregateGenerateJobInfo() != Module::SUCCESS) {
            HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
            ReportJobDetailsWithErrorCode(jobStatus, PROGRESS0, jobLogLabel, JobLogLevel::TASK_LOG_ERROR,
                HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
            return Module::FAILED;
        }
        // update the scan control path with individual restore iteration
        if (!m_fineGrainedRestore) {
            m_scanControlFilePath += "/" + to_string(m_orderNumberForAggregate);
        }
    } else {
        if (!InitPathForRestore()) {
            HCP_Log(ERR, MODULE) << "InitPathInfo failed" << HCPENDLOG;
            ReportJobDetailsWithErrorCode(jobStatus, PROGRESS0, jobLogLabel, JobLogLevel::TASK_LOG_ERROR,
                HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
            return Module::FAILED;
        }

        if (m_justRestoreByScan) {
            m_dcaheAndFcachePath = m_dcaheAndFcachePath + "/latest";
        }
    }
    PrintJobInfo();

    if (!HandleMonitorScannerCompletion(jobStatus, jobLogLabel, m_scanControlFilePath, m_restoreControlFilePath)) {
        ReportJobDetailsWithErrorCode(jobStatus, PROGRESS0, jobLogLabel, JobLogLevel::TASK_LOG_ERROR,
            HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        return Module::FAILED;
    }

    ReportJobDetails(SubJobStatus::COMPLETED, PROGRESS100, "", JobLogLevel::TASK_LOG_INFO);

    return Module::SUCCESS;
}

bool HetroRestoreJob::MountRemoteShare(std::string mountPath)
{
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        DBGLOG("CIFS do not use mount, return true directly!");
        return true;
    }

    if (!CreateDirectory(mountPath)) {
        HCP_Log(ERR, MODULE) << "Failed to create temp local mount path" << HCPENDLOG;
        return false;
    }

    std::string nfsShareName = m_nasShare.sharePath;
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS && m_nasShare.sharePath[0] == '/') {
        nfsShareName = nfsShareName.substr(1);
    }
    std::string nasProto = "nfs";
    std::string nasProtoVers = NFS_VERSION_3;
    std::string nasMountOptions = NFS_MOUNT_OPTION;
    std::vector<std::string> ipList;
    ipList.push_back(m_nasShare.nasShareExt.m_serviceIP);
    HCP_Log(DEBUG, MODULE) << "m_nasShare.nasShareExt.m_serviceIP: " << m_nasShare.nasShareExt.m_serviceIP << HCPENDLOG;
    std::string outSelectSvcIp;
    NasMountParams mountParams = {nasProto, nasProtoVers, nfsShareName, mountPath, nasMountOptions, "", "", ipList};
    if (MountNFS(mountParams, outSelectSvcIp) != MP_SUCCESS) {
        HCP_Log(ERR, MODULE) << "Mount remote nas failed for share: " << nfsShareName << HCPENDLOG;
        return false;
    }
    HCP_Log(DEBUG, MODULE) << "Mount remote nas successful" << HCPENDLOG;
    return true;
}

void HetroRestoreJob::UnmountRemoteShare(std::string mountPath)
{
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        DBGLOG("CIFS do not use mount, return directly!");
        return;
    }

    if (UnmountNas(mountPath) != MP_SUCCESS) {
        HCP_Log(ERR, MODULE) << "Unmount remote nas failed for tmp path: "
        << WIPE_SENSITIVE(mountPath) << HCPENDLOG;
        return;
    }
    RemoveDirectory(mountPath);
}

void HetroRestoreJob::RemoveCacheDirectories()
{
    RemoveDirectory(m_tempLocalMountPath);
    RemoveDirectory(m_cacheFsPath + "/statistics_" + m_jobId);
    RemoveDirectory(m_cacheFsPath + "/restore-job/"+ m_jobId);
    return;
}

bool HetroRestoreJob::CalculateJobStats()
{
    uint64_t noOfDirCopiedFromDirMtimePhase = 0;
    uint64_t noOfDirFailedFromDirMtimePhase = 0;
    uint32_t noOfFiles = 0;
    HetroNativeBackupStats mainJobStats {};

    std::string dir = m_cacheFsPath + "/statistics_" + m_jobId;
    std::vector<std::string> fileList {};
    if (!GetFileListInDirectory(dir, fileList)) {
        HCP_Log(ERR, MODULE) << "Get filelist for dir failed: " << dir << HCPENDLOG;
        return false;
    }
    uint32_t i = 0;
    for (const std::string& path : fileList) {
        if (noOfFiles++ % NUMBER50 == 0) {
            /* Since this loop may take too much time, report progress to framework, every 50 files */
            ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        }
        HetroNativeBackupStats subJobStats {};
        ReadBackupStatsFromFile(path, subJobStats);
        if (path.find("dirmtime") != std::string::npos) {
            noOfDirCopiedFromDirMtimePhase += subJobStats.m_noOfDirCopied;
            noOfDirFailedFromDirMtimePhase += subJobStats.m_noOfDirFailed;
        } else {
            mainJobStats += subJobStats;
        }
        RemoveFile(path);
    }
    // restore —— even if aggregate restore, also have mtime stage (to differ from backup)
    mainJobStats.m_noOfDirCopied = noOfDirCopiedFromDirMtimePhase;
    mainJobStats.m_noOfDirFailed = noOfDirFailedFromDirMtimePhase;

    std::string filePath = m_cacheFsPath + "/statistics_" + m_jobId + "/backup-stats-main-" + m_jobId + ".json";
    WriteBackupStatsToFile(filePath, mainJobStats);
    return true;
}

int HetroRestoreJob::PostJobInner()
{
    HetroNativeBackupStats backupStatistics {};
    std::string filePath {};

    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    if (!InitJobInfo()) {
        goto error;
    }
    PrintJobInfo();
    // 添加路由
    if (!OperateIpsRule(m_IpRuleList, "DELETE") ||
        !OperateIpsRuleForRemoteIp(m_remoteIpRuleList, "DELETE", m_nasShare.nasShareExt.m_serviceIP)) {
        ERRLOG("add ip rule failed");
        goto error;
    }
    MergeBackupFailureRecords(m_cacheFsPath);

    if (!GetSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        goto error;
    }

    if (!RemoveCertification(m_restoreJobPtr->targetEnv)) {
        goto error;
    }
     // Delete all the shared reources which are created in the PrerequisiteJob
    DeleteSharedResources(m_jobId);

    filePath = m_cacheFsPath + "/statistics_" + m_jobId + "/backup-stats-main-" + m_jobId + ".json";
    ReadBackupStatsFromFile(filePath, backupStatistics);
    RemoveCacheDirectories();
    PrintBackupCopyInfo(backupStatistics);
    ReportBackupCompletionStatus(backupStatistics);
    return MP_SUCCESS;

error:
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS
        && m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        CleanKrbFilesForCifs(m_jobId);
    }
    RemoveCertification(m_restoreJobPtr->targetEnv);
    DeleteSharedResources(m_jobId);
    ReportJobDetailsWithErrorCode(SubJobStatus::FAILED, PROGRESS0, "", JobLogLevel::TASK_LOG_ERROR,
        HomoErrorCode::ERROR_AGENT_INTERNAL_ERROR);
    g_nodeLevelTaskInfo.Erase(m_jobId);
    return MP_FAILED;
}

bool HetroRestoreJob::GenerateSubJobList(std::vector<SubJob> &subJobList, std::vector<std::string> &ctrlFileList,
    const std::string &srcCtrlFileFullPath, const std::string &dstCtrlFileFullPath)
{
    SubJob subJob {};
    uint32_t subTaskType {};

    string dstCtrlFileRelPath = dstCtrlFileFullPath.substr(m_cacheFsPath.length(), string::npos);
    if (!InitSubJobInfo(subJob, m_jobId, dstCtrlFileRelPath, subTaskType)) {
        HCP_Log(ERR, MODULE) << "Init subtask failed" << HCPENDLOG;
        return false;
    }

    subJobList.push_back(subJob);
    ctrlFileList.push_back(srcCtrlFileFullPath);

    return true;
}

bool HetroRestoreJob::CreateSubTasksFromCtrlFile(std::string srcDir, std::string dstDir,
    uint32_t subTaskType, bool isFinal)
{
    std::vector<std::string> srcFileList {};
    std::vector<SubJob> subJobList {};
    std::vector<std::string> ctrlFileList {};
    uint32_t validCtrlFileCntr = 0;
    static int64_t lastCreateJobErrTime = 0;

    if (!isFinal && lastCreateJobErrTime != 0 &&
        ((GetCurrentTimeInSeconds() - lastCreateJobErrTime) < NUMBER180)) {
        HCP_Log(WARN, MODULE) << "lastCreateJobErrTime 180 sec check failed, try later" << HCPENDLOG;
        return true;
    }
    lastCreateJobErrTime = 0;

    if (!checkFilePathAndGetSrcFileList(srcDir, dstDir, srcFileList)) {
        return false;
    }
    INFOLOG("Enter CreateSubTasksFromCtrlFile, NumOfCtrlFiles: %d isFinal: %d", srcFileList.size(), isFinal);

    for (uint32_t i = 0; i < srcFileList.size(); ++i) {
        if (IsAbortJob()) {
            INFOLOG("Exit Abort for taskid: %s, subtaskid: %s", m_jobId.c_str(), m_subJobId.c_str());
            return true;
        }

        std::string srcCtrlFileFullPath = srcFileList[i];
        if (!IsValidCtrlFile(subTaskType, srcCtrlFileFullPath))
            continue;
        if (!isFinal &&  validCtrlFileCntr++ >=NUMBER100) {
            break;
        }

        std::string ctrlFileParentPath = GetPathName(srcCtrlFileFullPath);
        std::string ctrlFileRelPath = srcCtrlFileFullPath.substr(m_cacheFsPath.length(), string::npos);
        std::string ctrlFile = srcCtrlFileFullPath.substr(ctrlFileParentPath.length() + NUMBER1,
            srcCtrlFileFullPath.length() - ctrlFileParentPath.length() - NUMBER1);

        std::string dstCtrlFileFullPath = dstDir + "/" + ctrlFile;

        CopyFile(srcCtrlFileFullPath, dstDir);

        if (!GenerateSubJobList(subJobList, ctrlFileList, srcCtrlFileFullPath, dstCtrlFileFullPath)) {
            HCP_Log(ERR, MODULE) << "Exit CreateSubTasksFromCtrlFile, GenerateSubJobList failed" << HCPENDLOG;
            return false;
        }

        // We create 10 Jobs at a time. If 10 is not accumulated, continue
        if (subJobList.size() % NUMBER10 != 0)
            continue;

        if (!CheckAndRetryCreateSubTask(subJobList, ctrlFileList, lastCreateJobErrTime, isFinal)) {
            return false;
        }
    }

    if (!CheckAndRetryCreateSubTask(subJobList, ctrlFileList, lastCreateJobErrTime, isFinal)) {
        return false;
    }

    HCP_Log(INFO, MODULE) << "Exit CreateSubTasksFromCtrlFile" << HCPENDLOG;
    return true;
}

bool HetroRestoreJob::UpdateCopyPhaseStartTimeInGenRsc()
{
    if (m_generalInfo.m_backupCopyPhaseStartTime != 0) {
        return true;
    }

    HCP_Log(INFO, MODULE) << "Enter UpdateCopyPhaseStartTimeInGenRsc" << HCPENDLOG;

    m_generalInfo.m_backupCopyPhaseStartTime = GetCurrentTimeFromRemoteServer(m_metaFsPath);

    if (m_generalInfo.m_backupCopyPhaseStartTime == 0) {
        HCP_Log(ERR, MODULE) << "Get current time of first backup copy subtask failed" << HCPENDLOG;
        return false;
    }
    if (!(UpdateGeneralResource(m_jobId, m_generalInfo))) {
        HCP_Log(ERR, MODULE) << "UpdateGeneralResource failed" << HCPENDLOG;
        return false;
    }

    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0,
        "nas_plugin_hetro_restore_data_start_label", JobLogLevel::TASK_LOG_INFO);

    HCP_Log(INFO, MODULE) << "Exit UpdateCopyPhaseStartTimeInGenRsc" << HCPENDLOG;
    return true;
}

bool HetroRestoreJob::InitSubJobInfo(SubJob &subJob, const std::string jobId,
    const std::string ctrlFile, uint32_t &subTaskType)
{
    std::string subTaskName;
    uint32_t subTaskPrio {0};
    string restoreSubJobInfoStr;
    HetroBackupSubJob restoreSubJobInfo;

    GetSubJobTypeByFileName(ctrlFile, subTaskName, subTaskType, subTaskPrio);
    restoreSubJobInfo.m_ControlFile = ctrlFile;
    restoreSubJobInfo.m_SubTaskType = subTaskType;
    if (m_aggregateRestore) {
        restoreSubJobInfo.dataCachePath = m_dataFsPathList[m_orderNumberForAggregate];
        restoreSubJobInfo.metaFilePath = m_metaFsPathList[m_orderNumberForAggregate];
        restoreSubJobInfo.dcacheAndFcachePath = m_dcaheAndFcachePathList[m_orderNumberForAggregate];
        restoreSubJobInfo.copyId = m_copyIdList[m_orderNumberForAggregate];
    } else {
        restoreSubJobInfo.dataCachePath = "";
        restoreSubJobInfo.metaFilePath = "";
        restoreSubJobInfo.dcacheAndFcachePath = m_dcaheAndFcachePath;
    }
    restoreSubJobInfo.dcacheAndFcachePath =
        restoreSubJobInfo.dcacheAndFcachePath.substr(m_cacheFsPath.length(), string::npos);
    restoreSubJobInfo.orderNumberForAggregate = m_orderNumberForAggregate;

    if (!Module::JsonHelper::StructToJsonString(restoreSubJobInfo, restoreSubJobInfoStr)) {
        HCP_Log(ERR, MODULE) << "Convert to json failed for subJob info: " << HCPENDLOG;
        return false;
    }
    HCP_Log(INFO, MODULE) << "restoreSubJobInfoStr is : "  << restoreSubJobInfoStr << HCPENDLOG;
    subJob.__set_jobId(jobId);
    subJob.__set_jobName(subTaskName);
    subJob.__set_jobType(SubJobType::BUSINESS_SUB_JOB);
    subJob.__set_policy(ExecutePolicy::ANY_NODE);
    subJob.__set_jobInfo(restoreSubJobInfoStr);
    subJob.__set_jobPriority(subTaskPrio);
    subJob.__set_ignoreFailed(false);
    return true;
}

bool HetroRestoreJob::HandleMonitorScannerCompletion(SubJobStatus::type &jobStatus, std::string &jobLogLabel,
    const std::string &scanCtrlFilePath, const std::string &backupCtrlFilePath)
{
    if (!CreateSubTasksFromCtrlFile(scanCtrlFilePath, backupCtrlFilePath, SUBJOB_TYPE_DATACOPY_COPY_PHASE, true) ||
        !CreateSubTasksFromCtrlFile(scanCtrlFilePath, backupCtrlFilePath, SUBJOB_TYPE_DATACOPY_DELETE_PHASE, true) ||
        !CreateSubTasksFromCtrlFile(scanCtrlFilePath, backupCtrlFilePath, SUBJOB_TYPE_DATACOPY_HARDLINK_PHASE, true) ||
        !CreateSubTasksFromCtrlFile(scanCtrlFilePath, backupCtrlFilePath, SUBJOB_TYPE_DATACOPY_DIRMTIME_PHASE, true) ||
        !CreateCheckSubTask()) {
        HCP_Log(ERR, MODULE) << "Create subtask failed" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_restore_scan_fail_label";
        return false;
    }
    return true;
}

bool HetroRestoreJob::MonitorScanner(HetroNativeScanStatistics &scanStatistics,
    SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    HCP_Log(INFO, MODULE) << "Enter Monitor Scanner" << HCPENDLOG;
    SCANNER_TASK_STATUS scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_INPROGRESS;
    std::string scanCtrlFilePath = m_scanControlFilePath;
    std::string backupCtrlFilePath = m_restoreControlFilePath;
    jobStatus = SubJobStatus::RUNNING;
    jobProgress = 0;
    jobLogLabel = "";
    ScanStatistics tempStatistic{};
    do {
        m_scanStatus = m_scanner->GetStatus();
        /* Ensure scanner is ready and start to scan */
        if (m_scanStatus == SCANNER_STATUS::INIT) {
            sleep(SUBTASK_WAIT_FOR_SCANNER_READY_IN_SEC);
            continue;
        }
        FillMonitorScannerVarDetails(scanTaskStatus, jobStatus, jobLogLabel, jobProgress);
        if (scanTaskStatus != SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_INPROGRESS) {
            break;
        }

        if (IsAbortJob()) {
            HCP_Log(INFO, MODULE) << "Scanner - Abort is invocked for"
                << " taskid: " << m_jobId << ", subtaskid: " << m_subJobId << HCPENDLOG;
            if (SCANNER_STATUS::SUCCESS != m_scanner->Abort()) {
                HCP_Log(ERR, MODULE) << "scanner Abort is failed" << HCPENDLOG;
            }
        }

        if (!CreateSubTasksFromCtrlFile(scanCtrlFilePath, backupCtrlFilePath, SUBJOB_TYPE_DATACOPY_COPY_PHASE)) {
            HCP_Log(ERR, MODULE) << "Create subtask failed, abort scan" << HCPENDLOG;
            m_scanner->Abort();
        }
        HCP_Log(INFO, MODULE) << "scanner status:" << (int)scanTaskStatus << HCPENDLOG;

        ScanStatistics statistic = m_scanner->GetStatistics();
        tempStatistic = AddScanStatistics(statistic, m_scanStatistics);
        ReportScannerRunningStatus(tempStatistic);
        sleep(GENERATE_SUBTASK_MONITOR_DUR_IN_SEC);
    } while (true);

    if (!CreateBackupJobTaskToCreateFurtherSubTasks()) {
        return false;
    }

    HCP_Log(INFO, MODULE) << "Exit Monitor Scanner" << HCPENDLOG;
    return true;
}

bool HetroRestoreJob::ReportScannerRunningStatus(ScanStatistics &scanStatistics)
{
    if ((GetCurrentTimeInSeconds() - m_lastScannerReportTime) > SCANNER_REPORT_CIRCLE_TIME) {
        ReportJobDetails(SubJobStatus::RUNNING,
                         PROGRESS0,
                         "nas_plugin_hetro_restore_scan_inprogress_label",
                         JobLogLevel::TASK_LOG_INFO,
                         std::to_string(scanStatistics.mTotDirsToBackup),
                         std::to_string(scanStatistics.mTotFilesToBackup),
                         FormatCapacity(scanStatistics.mTotalSizeToBackup));
        m_lastScannerReportTime = GetCurrentTimeInSeconds();
    } else {
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    }
    return true;
}

bool HetroRestoreJob::ReportScannerCompleteStatus(ScanStatistics &scanStatistics)
{
    if (scanStatistics.mTotFailedDirs != 0) {
        ReportJobDetails(SubJobStatus::COMPLETED,
                         PROGRESS100,
                         "nas_plugin_hetro_restore_scan_completed_with_warn_label",
                         JobLogLevel::TASK_LOG_WARNING,
                         std::to_string(scanStatistics.mTotDirsToBackup),
                         std::to_string(scanStatistics.mTotFilesToBackup),
                         FormatCapacity(scanStatistics.mTotalSizeToBackup));
    } else {
        ReportJobDetails(SubJobStatus::COMPLETED,
                         PROGRESS100,
                         "nas_plugin_hetro_restore_scan_completed_label",
                         JobLogLevel::TASK_LOG_INFO,
                         std::to_string(scanStatistics.mTotDirsToBackup),
                         std::to_string(scanStatistics.mTotFilesToBackup),
                         FormatCapacity(scanStatistics.mTotalSizeToBackup));
    }
    return true;
}

void HetroRestoreJob::FillMonitorScannerVarDetails(SCANNER_TASK_STATUS &scanTaskStatus,
    SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    if (m_scanStatus == SCANNER_STATUS::COMPLETED) {
        HCP_Log(INFO, MODULE) << "Scan completed" << HCPENDLOG;
        jobProgress = PROGRESS100;
        jobStatus = SubJobStatus::COMPLETED;
        jobLogLabel = "nas_plugin_hetro_restore_scan_completed_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_SUCCESS;
    } else if (m_scanStatus == SCANNER_STATUS::FAILED) {
        HCP_Log(ERR, MODULE) << "Scan failed" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_restore_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::ABORT_IN_PROGRESS) {
        HCP_Log(ERR, MODULE) << "Scan abort in progress" << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTING;
        jobLogLabel = "";
    } else if (m_scanStatus == SCANNER_STATUS::ABORTED) {
        HCP_Log(ERR, MODULE) << "Scan aborted" << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTED;
        jobLogLabel = "";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_ABORTED;
    } else if (m_scanStatus == SCANNER_STATUS::SCAN_READ_COMPLETED) {
        jobStatus = SubJobStatus::RUNNING;
        jobLogLabel = "";
    } else if (m_scanStatus == SCANNER_STATUS::CTRL_DIFF_IN_PROGRESS) {
        jobStatus = SubJobStatus::RUNNING;
        jobLogLabel = "";
    } else if (m_scanStatus == SCANNER_STATUS::SECONDARY_SERVER_NOT_REACHABLE) {
        HCP_Log(ERR, MODULE) << "Scan failed as sec nas server is not reachable" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_restore_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::PROTECTED_SERVER_NOT_REACHABLE) {
        HCP_Log(ERR, MODULE) << "Scan failed as protected nas server is not reachable" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_restore_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::ERROR_INC_TO_FULL) {
        HCP_Log(ERR, MODULE) << "Scan failed as to change INC to FULL Backup" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "nas_plugin_hetro_backup_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    }
    return;
}

HetroRestoreJob::MONITOR_BACKUP_RES_TYPE HetroRestoreJob::MonitorBackup(BackupStats &backupStatistics,
    SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    HCP_Log(INFO, MODULE) << "Enter Monitor Backup" << HCPENDLOG;
    jobStatus = SubJobStatus::RUNNING;
    jobProgress = 0;
    jobLogLabel = "";
    BackupStats tmpStats;
    time_t statLastUpdateTime = PluginUtils::GetCurrentTimeInSeconds();

    do {
        m_backupStatus = m_backup->GetStatus();
        HCP_Log(INFO, MODULE) << "backupStatus:" << static_cast<int>(m_backupStatus) << HCPENDLOG;
        tmpStats = m_backup->GetStats();
        if (backupStatistics != tmpStats) {
            statLastUpdateTime = PluginUtils::GetCurrentTimeInSeconds();
            INFOLOG("backup statistics last update time: %ld", statLastUpdateTime);
            backupStatistics = tmpStats;
        }
        if (m_backupStatus == BackupPhaseStatus::INPROGRESS &&
            PluginUtils::GetCurrentTimeInSeconds() - statLastUpdateTime >
            Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION, "BACKUP_STUCK_TIME")) {
            HandleMonitorStuck(backupStatistics, jobStatus, jobLogLabel, jobProgress);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_NEEDRETRY;
        }
        UpdateBackupStatistics(backupStatistics);
        if (m_backupStatus == BackupPhaseStatus::COMPLETED) {
            HandleMonitorComplete(jobStatus, jobLogLabel, jobProgress);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_SUCCESS;
        } else if (m_backupStatus == BackupPhaseStatus::FAILED ||
            m_backupStatus == BackupPhaseStatus::FAILED_NOACCESS ||
            m_backupStatus == BackupPhaseStatus::FAILED_NOSPACE ||
            m_backupStatus == BackupPhaseStatus::FAILED_SEC_SERVER_NOTREACHABLE ||
            m_backupStatus == BackupPhaseStatus::FAILED_PROT_SERVER_NOTREACHABLE) {
            HandleMonitorFailed(jobStatus, jobLogLabel);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_FAILED;
        } else if (m_backupStatus == BackupPhaseStatus::ABORTED) {
            HCP_Log(ERR, MODULE) << "Monitor Backup - BACKUP_PHASE_STATUS_ABORTED " << HCPENDLOG;
            jobStatus = SubJobStatus::ABORTED;
            jobLogLabel = "";
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_ABORTED;
        } else if (m_backupStatus == BackupPhaseStatus::ABORT_INPROGRESS) {
            jobStatus = SubJobStatus::ABORTING;
            jobLogLabel = "";
        }

        if (IsAbortJob()) {
            HandleMonitorAbort();
        }
        sleep(GENERATE_SUBTASK_MONITOR_DUR_IN_SEC);
    } while (true);
    HCP_Log(INFO, MODULE) << "Exit Monitor Backup" << HCPENDLOG;
    return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_SUCCESS;
}

void HetroRestoreJob::HandleMonitorAbort()
{
    HCP_Log(INFO, MODULE) << "Restore - Abort is invocked for"
        << " taskid: " << m_jobId << ", subtaskid: " << m_subJobId << HCPENDLOG;
    if (BackupRetCode::SUCCESS != m_backup->Abort()) {
        HCP_Log(ERR, MODULE) << "restore sub task  Abort is failed" << HCPENDLOG;
    }
}

void HetroRestoreJob::HandleMonitorComplete(SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    HCP_Log(INFO, MODULE) << "Monitor Backup - BACKUP_PHASE_STATUS_COMPLETED " << HCPENDLOG;
    jobLogLabel = "";
    jobProgress = PROGRESS100;
    jobStatus = SubJobStatus::COMPLETED;
}

void HetroRestoreJob::HandleMonitorFailed(SubJobStatus::type &jobStatus, std::string &jobLogLabel)
{
    HCP_Log(ERR, MODULE) << "Monitor Backup - BACKUP_PHASE_STATUS_FAILED " << HCPENDLOG;
    jobStatus = SubJobStatus::FAILED;
    jobLogLabel = "nas_plugin_hetro_restore_data_fail_label";
}

void HetroRestoreJob::HandleMonitorStuck(BackupStats &backupStatistics,
    SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    WARNLOG("backup statistic has not been update for 300s");
    if (BackupRetCode::SUCCESS != m_backup->Abort()) {
        HCP_Log(ERR, MODULE) << "backup Abort is failed" << HCPENDLOG;
    }
    backupStatistics.noOfDirFailed += backupStatistics.noOfDirToBackup - backupStatistics.noOfDirCopied;
    backupStatistics.noOfFilesFailed += backupStatistics.noOfFilesToBackup - backupStatistics.noOfFilesCopied;
    UpdateBackupStatistics(backupStatistics);
    jobLogLabel = "";
    jobProgress = PROGRESS100;
    jobStatus = SubJobStatus::COMPLETED;
}

bool HetroRestoreJob::ReportBackupRunningStatus(uint64_t curSubJobDataSz)
{
    HetroNativeBackupStats mainBackupJobStatistics {};

    if (m_subTaskType == SUBJOB_TYPE_DATACOPY_DIRMTIME_PHASE) {
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        return true;
    }
    std::string dir = m_cacheFsPath + "/statistics_" + m_jobId;
    std::vector<std::string> fileList {};
    if (!GetFileListInDirectory(dir, fileList)) {
        HCP_Log(ERR, MODULE) << "Get filelist for dir failed: " << dir << HCPENDLOG;
        return false;
    }
    uint32_t i = 0;
    uint32_t noOfFiles = 0;
    for (i = 0; i < fileList.size(); ++i) {
        HetroNativeBackupStats subBackupJobStatistics {};
        if (!ReadBackupStatsFromFile(fileList[i], subBackupJobStatistics))
            continue;
        mainBackupJobStatistics.m_noOfDirCopied += subBackupJobStatistics.m_noOfDirCopied;
        mainBackupJobStatistics.m_noOfFilesCopied += subBackupJobStatistics.m_noOfFilesCopied;
        mainBackupJobStatistics.m_noOfBytesCopied += subBackupJobStatistics.m_noOfBytesCopied;

        if (noOfFiles++ % NUMBER10 == 0) {
            /* Since this loop may take too much time, report progress to framework, every 10 files */
            ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        }
    }

    if (mainBackupJobStatistics.m_noOfBytesCopied != 0)
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0,
                         "nas_plugin_hetro_restore_data_inprogress_label", JobLogLevel::TASK_LOG_INFO,
                         std::to_string(mainBackupJobStatistics.m_noOfDirCopied),
                         std::to_string(mainBackupJobStatistics.m_noOfFilesCopied),
                         FormatCapacity(mainBackupJobStatistics.m_noOfBytesCopied));
    else
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);

    return true;
}

bool HetroRestoreJob::ReportBackupCompletionStatus(HetroNativeBackupStats &backupStatistics)
{
    /* As we report this from teardown-subjob or postjob, set datasize to 0. SO that UBC do not consider this size
      for speed calc */
    m_dataSize = 0;
    INFOLOG("HetroRestoreJob::ReportBackupCompletionStatus--m_noOfFilesWriteSkip:%d --m_skipFileCnt:%d",
        backupStatistics.m_noOfFilesWriteSkip, backupStatistics.m_skipFileCnt);
    if (backupStatistics.m_noOfDirFailed != 0 || backupStatistics.m_noOfFilesFailed != 0) {
        std::string line;
        int lineCount = 0;
        std::ifstream faiureFile(m_failureRecordRoot + "/" + m_jobId + "/" + m_jobId + "_bundle.csv");
        std::vector<std::string> message;
        if (faiureFile.is_open()) {
            while (std::getline(faiureFile, line) && ++lineCount <= FAILURE_OUTPUT_LINE) {
                message.push_back(line);
            }
            faiureFile.close();
        }
        ReportJobDetailsWithLabel(SubJobStatus::FAILED,
            "nas_plugin_hetro_restore_data_completed_with_warn_label",
            PROGRESS100,
            message,
            JobLogLevel::TASK_LOG_WARNING,
            HomoErrorCode::INTERNAL_ERROR_CODE,
            std::to_string(backupStatistics.m_noOfDirCopied),
            std::to_string(backupStatistics.m_noOfFilesCopied),
            FormatCapacity(backupStatistics.m_noOfBytesCopied),
            std::to_string(backupStatistics.m_noOfDirFailed),
            std::to_string(backupStatistics.m_noOfFilesFailed));
    } else if (backupStatistics.m_noOfFilesWriteSkip == 0) {
        ReportJobDetails(SubJobStatus::COMPLETED,
                         PROGRESS100,
                         "nas_plugin_hetro_restore_data_completed_label",
                         JobLogLevel::TASK_LOG_INFO,
                         std::to_string(backupStatistics.m_noOfDirCopied),
                         std::to_string(backupStatistics.m_noOfFilesCopied),
                         FormatCapacity(backupStatistics.m_noOfBytesCopied));
    } else {
        ReportJobDetails(SubJobStatus::COMPLETED,
                         PROGRESS100,
                         "nas_plugin_hetro_restore_data_completed_but_skip_some_restore_files_label",
                         JobLogLevel::TASK_LOG_INFO,
                         std::to_string(backupStatistics.m_noOfDirCopied),
                         std::to_string(backupStatistics.m_noOfFilesCopied),
                         FormatCapacity(backupStatistics.m_noOfBytesCopied),
                         std::to_string(backupStatistics.m_noOfFilesWriteSkip));
    }
    m_dataSize = backupStatistics.m_noOfBytesCopied/NUMBER1024;
    return true;
}

bool HetroRestoreJob::UpdateBackupStartTimeInSharedResource(HetroBackupSubJob& backupSubJob, std::string mountPath)
{
    INFOLOG("Enter UpdateBackupStartTimeInSharedResource, %d, %d, %d, %d", backupSubJob.orderNumberForAggregate,
        m_generalInfo.m_restoreCopyIndex, m_generalInfo.m_restoreDeleteIndex, backupSubJob.m_SubTaskType);
    if (!IsUpdateBackupStartTimeRequired(backupSubJob, m_generalInfo)) {
        return true;
    }

    std::string proto = (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) ? "nfs" : "cifs";

    HetroNativeGeneral generalInfo {};
    if (!LockGeneralResource(m_jobId))
        return false;
    if (!GetGeneralResource(m_jobId, generalInfo))
        goto error;

    if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_COPY_PHASE) {
        if (!UpdateGeneralInfoForCopyPhase(backupSubJob, mountPath, generalInfo)) {
            goto error;
        }
    }

    if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_DELETE_PHASE) {
        if (!UpdateGeneralInfoForDeletePhase(backupSubJob, mountPath, generalInfo)) {
            goto error;
        }
    }

    if (!UpdateGeneralResource(m_jobId, generalInfo))
        goto error;
    if (!UnlockGeneralResource(m_jobId))
        return false;
    return true;

error:
    UnlockGeneralResource(m_jobId);
    return false;
}

bool HetroRestoreJob::UpdateGeneralInfoForCopyPhase(HetroBackupSubJob& backupSubJob, std::string mountPath,
    HetroNativeGeneral& generalInfo)
{
    if ((generalInfo.m_backupCopyPhaseStartTime == 0) ||
        (generalInfo.m_restoreCopyIndex != backupSubJob.orderNumberForAggregate)) {
        if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
            if (!MountRemoteShare(mountPath)) {
                return false;
            }
            std::string errMsg;
            m_generalInfo.m_backupCopyPhaseStartTime = GetCurrentTimeFromRemoteServerWithMsg(mountPath, errMsg);
            if (m_generalInfo.m_backupCopyPhaseStartTime == 0) {
                std::string jobLogLabel = "nas_plugin_hetro_restore_prepare_fail_label";
                ReportJobDetailsWithLabelAndErrcode(make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED,
                    PROGRESS0), jobLogLabel, HomoErrorCode::INTERNAL_ERROR_CODE, errMsg);
                return false;
            }
            UnmountRemoteShare(mountPath);
        } else {
            m_generalInfo.m_backupCopyPhaseStartTime = GetCurrentTimeInSeconds();
        }
        if (m_generalInfo.m_backupCopyPhaseStartTime == 0) {
            HCP_Log(ERR, MODULE) << "Get current time of first backup copy subtask failed" << HCPENDLOG;
            return false;
        }
        generalInfo.m_backupCopyPhaseStartTime = m_generalInfo.m_backupCopyPhaseStartTime;
        if (generalInfo.m_restoreCopyIndex != backupSubJob.orderNumberForAggregate) {
            ++generalInfo.m_restoreCopyIndex;
        }
        if (backupSubJob.orderNumberForAggregate == 0) {
            ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0,
                "nas_plugin_hetro_restore_data_start_label", JobLogLevel::TASK_LOG_INFO);
        }
    }
    return true;
}

bool HetroRestoreJob::UpdateGeneralInfoForDeletePhase(HetroBackupSubJob& backupSubJob, std::string mountPath,
    HetroNativeGeneral& generalInfo)
{
    if ((generalInfo.m_backupDelPhaseStartTime == 0) ||
        (generalInfo.m_restoreDeleteIndex != backupSubJob.orderNumberForAggregate)) {
        if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
            m_generalInfo.m_backupDelPhaseStartTime = GetCurrentTimeFromRemoteServer(mountPath);
        } else {
            m_generalInfo.m_backupDelPhaseStartTime = GetCurrentTimeInSeconds();
        }
        if (m_generalInfo.m_backupDelPhaseStartTime == 0) {
            HCP_Log(ERR, MODULE) << "Get current time of first backup del subtask failed" << HCPENDLOG;
            return false;
        }
        generalInfo.m_backupDelPhaseStartTime = m_generalInfo.m_backupDelPhaseStartTime;
        if (generalInfo.m_restoreDeleteIndex != backupSubJob.orderNumberForAggregate) {
            ++generalInfo.m_restoreDeleteIndex;
        }
    }
    return true;
}

void HetroRestoreJob::FillInfoForNasShareRestoreToNasFileSystem()
{
    // nas Share恢复到同构新位置
    if (m_restoreJobPtr->copies[0].protectObject.subType != "NasShare" ||
        m_restoreJobPtr->targetObject.subType != "NasFileSystem") {
        return;
    }
    if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->extendInfo, m_flrExtend)) {
        HCP_Log(ERR, MODULE) << "convert flr extend info failed!" << HCPENDLOG;
        return;
    }
    HCP_Log(DEBUG, MODULE) << "m_flrExtend.targetLocation:" << m_flrExtend.targetLocation << HCPENDLOG;
    if (m_flrExtend.targetLocation != FLR_RESTORE_TYPE_NEW_VALUE2) {
        return;
    }
    m_flrRestoreType = FLR_RESTORE_TYPE_NEW;
    m_nasShare.nasShareExt.m_serviceIP = m_flrExtend.shareIp;
    m_nasShare.nasShareExt.m_protocol = m_flrExtend.protocol;
    m_nasShare.sharePath = m_flrExtend.sharePath;
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        m_nasShare.sharePath = dir_sep + m_flrExtend.sharePath;
    }
    if (!m_flrExtend.targetPath.empty()) {
        m_nasShare.sharePath += m_flrExtend.targetPath;
    }
}

bool HetroRestoreJob::PreInitJobInfo()
{
    if (!InitRepoPaths()) {
        return false;
    }
    if (IsNormalRestore() || IsFineGrainedHetroRestore()) {
        if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->targetObject.extendInfo, m_nasShare.nasShareExt)) {
            HCP_Log(ERR, MODULE) << "init (nas share info) job failed" << HCPENDLOG;
            return false;
        }
        if (IsFineGrainedHetroRestore()) {
            if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->extendInfo, m_flrExtend)) {
                HCP_Log(ERR, MODULE) << "convert flr extend info failed!" << HCPENDLOG;
                return false;
            }
            if (!m_flrExtend.targetPath.empty()) {
                m_nasShare.sharePath += m_flrExtend.targetPath;
                HCP_Log(INFO, MODULE) << "flr set restore path : " << WIPE_SENSITIVE(m_nasShare.sharePath) << HCPENDLOG;
            }
        }
        FillInfoForNasShareRestoreToNasFileSystem();

        if (IsHomoNewLocationRestore()) {
            InitFineGrainedRestoreJobInfo();
            HCP_Log(DEBUG, MODULE) << "Homo new location protocol " << m_nasShare.nasShareExt.m_protocol << HCPENDLOG;
        }
    } else if (IsFineGrainedHomoRestore() || IsFineGrainedHetroRestoreCifsToNative()) {
        // 识别targetIp， 识别恢复类型， 本机位置恢复用dataFS的remoteHost ip 和 nas share 去挂载
        InitFineGrainedRestoreJobInfo();
    }
    /* Data layout details TO-DO - THis is comming empty in thrift now */
    if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->extendInfo, m_dataLayoutExt)) {
        HCP_Log(ERR, MODULE) << "init (datalayout info) job failed" << HCPENDLOG;
        return false;
    }

    /* Aggregate config details */
    if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->extendInfo, m_aggrInfo)) {
        HCP_Log(ERR, MODULE) << "Failed to parse protctEnv extendInfo json to struct" << HCPENDLOG;
        return false;
    }

    if (!InitRestoreCopyMetaInfo()) {
        return false;
    }

    return true;
}

bool HetroRestoreJob::InitJobInfo()
{
    if (!InitHostInfo()) {
        HCP_Log(ERR, MODULE) << "init (nas host info) job failed" << HCPENDLOG;
        return false;
    }
    /* Protected NAS Share details (m_nasShare.serviceIp will be filled
     * from m_nasShare.nasShareExt.serviceIPList later in PrerequisisteJob())
     */
    m_nasShare.id = m_restoreJobPtr->targetObject.id;
    m_nasShare.sharePath = m_restoreJobPtr->targetObject.name[0] == '/' ? m_restoreJobPtr->targetObject.name :
        "/" + m_restoreJobPtr->targetObject.name;
    m_nasShare.auth = m_restoreJobPtr->targetObject.auth;

    HCP_Log(INFO, MODULE) << "JobExtendInfo:" << m_restoreJobPtr->copies[0].extendInfo << HCPENDLOG;
    if (GetCopyExtendInfo(NUMBER0, m_aggCopyExtendInfo) != Module::SUCCESS) {
        return Module::FAILED;
    }
    if (!PreInitJobInfo()) {
        return false;
    }
    ResolveDomain(m_nasShare.nasShareExt.m_serviceIP, m_remoteIpRuleList);
    if (IsFineGrainedHomoRestore() || IsFineGrainedHetroRestore()) {
        for (auto resource : m_restoreJobPtr->restoreSubObjects) {
            m_restoreResources.push_back(resource);
        }
    }
    m_dataLayoutExt.m_metadataBackupType = m_prevBackupCopyInfo.m_metadataBackupType;
    std::string proto = (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) ? "nfs" : "cifs";
    if (IsFineGrainedHomoRestore() || IsHomoNewLocationRestore() || IsFineGrainedHetroRestore()) {
        m_generalInfo.m_protocolVersion = NFS_VERSION_3;
    }
    std::string protoVersion = m_generalInfo.m_protocolVersion;

    /* kint to get TGT for CIFS share with kerberos authentication */
    if (!KinitTGT()) {
        HCP_Log(ERR, MODULE) << "Kinit TGT ticket failed for kerberos authtication." << HCPENDLOG;
        return false;
    }

    HCP_Log(INFO, MODULE) << "proto: " << proto <<" protoVersion: " << protoVersion << HCPENDLOG;
    return true;
}

bool HetroRestoreJob::InitHostInfo()
{
    /* Protected NAS Host details */
    m_nasHost.id = m_restoreJobPtr->targetEnv.id;
    m_nasHost.name = m_restoreJobPtr->targetEnv.name;
    m_nasHost.vendor = m_restoreJobPtr->targetEnv.type;
    m_nasHost.vendorSubType = m_restoreJobPtr->targetEnv.subType;
    m_nasHost.mgrIp = m_restoreJobPtr->targetEnv.endpoint;
    m_nasHost.port = m_restoreJobPtr->targetEnv.port;
    m_nasHost.auth = m_restoreJobPtr->targetEnv.auth;
    if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->targetEnv.extendInfo, m_nasHost.nasHostExt)) {
        HCP_Log(ERR, MODULE) << "Failed to parse targetEnv extendInfo json to struct" << HCPENDLOG;
        return false;
    }
    // 检查是否校验CA证书，并将CA证书落盘
    if (!CertVerifyMgr(m_restoreJobPtr->targetEnv, m_nasHost)) {
        return false;
    }
    return true;
}

bool HetroRestoreJob::InitFineGrainedRestoreJobInfo()
{
    HCP_Log(INFO, MODULE) << "Enter InitFineGrainedRestoreJobInfo!" << HCPENDLOG;
    if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->extendInfo, m_flrExtend)) {
        HCP_Log(ERR, MODULE) << "convert flr extend info failed!" << HCPENDLOG;
        return false;
    }
    if (m_flrExtend.targetLocation == FLR_RESTORE_TYPE_NATIVE_VALUE) {
        m_flrRestoreType = FLR_RESTORE_TYPE_NATIVE;
        if (m_dataFs.remoteHost.empty()) {
            HCP_Log(ERR, MODULE) << "m_dataFs.remoteHost is empty" <<HCPENDLOG;
            return false;
        }
        m_nasShare.nasShareExt.m_serviceIP = m_dataFs.remoteHost[0].ip;
    } else if (m_flrExtend.targetLocation == FLR_RESTORE_TYPE_NEW_VALUE || m_flrExtend.targetLocation ==
        FLR_RESTORE_TYPE_NEW_VALUE2 || m_flrExtend.targetLocation == FLR_RESTORE_TYPE_ORIGIN_VALUE) {
        m_flrRestoreType = FLR_RESTORE_TYPE_NEW;
        m_nasShare.nasShareExt.m_serviceIP = m_flrExtend.shareIp;
    }
    m_nasShare.nasShareExt.m_protocol = m_flrExtend.protocol;
    m_nasShare.sharePath = m_flrExtend.sharePath;
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        m_nasShare.sharePath = dir_sep + m_flrExtend.sharePath;
    }
    if (!m_flrExtend.targetPath.empty()) {
        m_nasShare.sharePath += m_flrExtend.targetPath;
    }
    // cifs 恢复到本机位置
    if (m_dataFs.protocol == RepositoryProtocolType::type::CIFS &&
        m_flrExtend.targetLocation == FLR_RESTORE_TYPE_NATIVE_VALUE) {
        HCP_Log(INFO, MODULE) << "Dest auth :" << WIPE_SENSITIVE(m_dataFs.auth.authkey)
                            << " , " << m_dataFs.auth.authType << HCPENDLOG;
        m_nasShare.auth = m_dataFs.auth;
    }
    if (m_flrExtend.targetLocation == RESTORE_TYPE_HETER_NEW_VALUE) {
        TargetEnvExtendInfo applicationEnvExtentInfo {};
        if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->targetObject.extendInfo,
            applicationEnvExtentInfo)) {
            HCP_Log(ERR, MODULE) << "ApplicationEnvExtentInfo is invaild." << HCPENDLOG;
        }
        if (applicationEnvExtentInfo.shareMode != "") {
            m_nasShare.nasShareExt.m_protocol = applicationEnvExtentInfo.shareMode;
        }
        m_nasShare.sharePath = *(m_restoreJobPtr->targetObject.name.begin()) == '/' ?
                m_restoreJobPtr->targetObject.name : dir_sep + m_restoreJobPtr->targetObject.name;
        m_nasHost.name = m_nasShare.sharePath;
        HCP_Log(INFO, MODULE) << "share name : " << WIPE_SENSITIVE(m_nasHost.name) << HCPENDLOG;
    }
    HCP_Log(INFO, MODULE) << "set info for fine granded restore : " << m_flrExtend.shareIp << " "
        << m_nasShare.nasShareExt.m_protocol << " " << WIPE_SENSITIVE(m_nasShare.sharePath) << HCPENDLOG;
    return true;
}

bool HetroRestoreJob::InitRestoreCopyMetaInfo()
{
    if (IsFineGrainedHomoRestore() || IsHomoNewLocationRestore() || IsFineGrainedHetroRestore()) {
        return true;
    }
    GetJsonFilePath();
    if (!ReadBackupCopyFromFile(m_backupCopyMetaFile, m_prevBackupCopyInfo)) {
        HCP_Log(ERR, MODULE) << "ReadBackupCopyFromFile failed " << HCPENDLOG;
        return false;
    }

    m_generalInfo.m_protocolVersion = m_prevBackupCopyInfo.m_protocolVersion;
    HCP_Log(INFO, MODULE) << "ReadBackupCopyFromFile succ "
        << "m_generalInfo.m_protocolVersion:" << m_generalInfo.m_protocolVersion << HCPENDLOG;
    return true;
}

void HetroRestoreJob::GetJsonFilePath()
{
    string copyId;
    if (m_aggregateRestore) {
        Copy lastCopy = m_restoreJobPtr->copies.back();
        AggCopyExtendInfo aggCopyExtendInfo;
        if (!Module::JsonHelper::JsonStringToStruct(lastCopy.extendInfo, aggCopyExtendInfo)) {
            ERRLOG("get last copy agg copy extend info failed");
            return;
        }
        copyId = aggCopyExtendInfo.metaPathSuffix;
        if (m_tapeCopy) {
            copyId = m_tapeCopyId;
        }
    }
    m_backupCopyMetaFile = m_metaFsPath + "/" + copyId + BACKUP_COPY_METAFILE;
    HCP_Log(DEBUG, MODULE) << "m_backupCopyMetaFile is: " << m_backupCopyMetaFile << HCPENDLOG;
}

void HetroRestoreJob::InitServiceIp()
{
    m_dataFsSvcIp  = m_dataFs.remoteHost[g_nodeLevelTaskInfo.GetSubTasksCount() % m_dataFs.remoteHost.size()].ip;
    m_cacheFsSvcIp = m_cacheFs.remoteHost[g_nodeLevelTaskInfo.GetSubTasksCount() % m_cacheFs.remoteHost.size()].ip;
    m_metaFsSvcIp  = m_metaFs.remoteHost[g_nodeLevelTaskInfo.GetSubTasksCount() % m_metaFs.remoteHost.size()].ip;
}

bool HetroRestoreJob::InitRepoPaths()
{
    /* MetaFs and BackupFs to be used */
    if (m_restoreJobPtr->copies.empty()) {
        ERRLOG("no copy found in restore job");
        return false;
    }

    int copyIndex = m_aggregateRestore ? m_restoreJobPtr->copies.size() - 1 : 0;
    for (unsigned int i = 0; i < m_restoreJobPtr->copies[copyIndex].repositories.size(); ++i) {
        auto repoType = m_restoreJobPtr->copies[copyIndex].repositories[i].repositoryType;
        if (repoType == RepositoryDataType::CACHE_REPOSITORY) {
            m_cacheFs = m_restoreJobPtr->copies[copyIndex].repositories[i];
        } else if (repoType == RepositoryDataType::DATA_REPOSITORY) {
            m_dataFs = m_restoreJobPtr->copies[copyIndex].repositories[i];
            // 要过滤掉remoteHost中不支持的协议类型并去重
            if (m_dataFs.protocol == RepositoryProtocolType::CIFS)
                m_dataFs.remotePath = "/" + m_dataFs.remoteName;
        } else if (repoType == RepositoryDataType::META_REPOSITORY) {
            m_metaFs = m_restoreJobPtr->copies[copyIndex].repositories[i];
        }
    }
    FilterUnsupportedRemoteHost();
    if (m_cacheFs.path.size() == 0 || m_metaFs.path.size() == 0) {
        ERRLOG("cacheFs list size = %u, metaFs list size = %u", m_cacheFs.path.size(), m_metaFs.path.size());
        return false;
    }

    InitServiceIp();

    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        for (auto remoteHost : m_dataFs.remoteHost) {
            DBGLOG("remote host ip: %s added to ip rule list.", remoteHost.ip.c_str());
            m_IpRuleList.push_back(remoteHost.ip);
        }
    }

    m_tempLocalMountPath = "/mnt/" + m_nasShare.id;
    if (!CreateDirectory(m_tempLocalMountPath)) {
        ERRLOG("Failed to create temp local mount path");
        return false;
    }

    // plugin can use any mounted cache path given by agent ,so using first one
    m_cacheFsPath = m_cacheFs.path[g_nodeLevelTaskInfo.GetSubTasksCount() % m_cacheFs.path.size()];
    m_cacheFsPath = m_aggregateRestore ? m_cacheFsPath : GetPathName(m_cacheFsPath);
    DBGLOG("m_cacheFs.remotePath = %s, raw m_cacheFsPath = %s", m_cacheFs.remotePath.c_str(), m_cacheFsPath.c_str());

    // plugin can use any mounted meta path given by agent ,so using first one
    m_metaFsPath = m_metaFs.path[g_nodeLevelTaskInfo.GetSubTasksCount() % m_metaFs.path.size()];
    DBGLOG("m_metaFs.remotePath = %s, m_metaFsPath = %s", m_metaFs.remotePath.c_str(), m_metaFsPath.c_str());
    return true;
}

void HetroRestoreJob::FilterUnsupportedRemoteHost()
{
    // 只对内置插件nfs应用, 内置插件nfs是127.0.0.1 ， 无需过滤
    if (PluginUtils::IsInnerAgent() && m_dataFs.protocol == RepositoryProtocolType::NFS) {
        INFOLOG("no need filter for inner agent nfs protocol!");
        return;
    }
    // 只有cifs协议的恢复才会去掉不支持的remoteHost
    if (m_dataFs.protocol == RepositoryProtocolType::CIFS) {
        INFOLOG("Enter FilterUnsupportedRemoteHost!");
        std::vector<HostAddress>& vec = m_dataFs.remoteHost;
        vec.erase(remove_if(vec.begin(), vec.end(),
            [](HostAddress& host) {
                return host.supportProtocol != HOST_PROTOCOL_TYPE_CIFS &&
                    host.supportProtocol != HOST_PROTOCOL_TYPE_NFS_CIFS &&
                    host.supportProtocol != HOST_PROTOCOL_TYPE_ALL;
            }),
            vec.end());
        m_metaFs.remoteHost = m_dataFs.remoteHost;
        for (auto& host : m_dataFs.remoteHost) {
            INFOLOG("Print host : %s, %d, %d", host.ip.c_str(), host.port, host.supportProtocol);
        }
        return;
    } else if (m_dataFs.protocol == RepositoryProtocolType::NFS) {
        INFOLOG("Enter FilterUnsupportedRemoteHost!");
        std::vector<HostAddress>& vec = m_dataFs.remoteHost;
        vec.erase(remove_if(vec.begin(), vec.end(),
            [](HostAddress& host) {
                return host.supportProtocol != HOST_PROTOCOL_TYPE_NFS &&
                    host.supportProtocol != HOST_PROTOCOL_TYPE_NFS_CIFS &&
                    host.supportProtocol != HOST_PROTOCOL_TYPE_ALL;
            }),
            vec.end());
        m_metaFs.remoteHost = m_dataFs.remoteHost;
        for (auto& host : m_dataFs.remoteHost) {
            INFOLOG("Print host : %s, %d, %d", host.ip.c_str(), host.port, host.supportProtocol);
        }
        return;
    }
    INFOLOG("other protocol!");
    return;
}

bool HetroRestoreJob::PrintJobInfo()
{
    HCP_Log(INFO, MODULE) << "jobPhase: " << m_jobCtrlPhase << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "jobId: " << m_jobId << HCPENDLOG;

    HCP_Log(INFO, MODULE) << "backupJobType: " <<
        (IsNormalRestore() ? "NORMAL_RESTORE" : "INSTANT_RESTORE") << HCPENDLOG;

    HCP_Log(INFO, MODULE) << "nasHost.id: " << m_nasHost.id << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasHost.name: " << WIPE_SENSITIVE(m_nasHost.name) << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasHost.vendor: " << m_nasHost.vendor << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasHost.vendorsubType: " << m_nasHost.vendorSubType << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasHost.mgrIp: " << m_nasHost.mgrIp << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasHost.port: " << m_nasHost.port << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasHost.auth.type: " << m_nasHost.auth.authType << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasHost.auth.key: " << WIPE_SENSITIVE(m_nasHost.auth.authkey) << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasHost.useSnapshot: " << m_nasHost.nasHostExt.m_useSnapshot << HCPENDLOG;

    HCP_Log(INFO, MODULE) << "nasShare.id: " << m_nasShare.id << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasShare.svcIp: " << m_nasShare.nasShareExt.m_serviceIP << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.sharePath: " << WIPE_SENSITIVE(m_nasShare.sharePath) << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.protocol: " << m_nasShare.nasShareExt.m_protocol << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "nasShare.authMode: " << m_nasShare.nasShareExt.m_authMode << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasShare.kerberosId: " << m_nasShare.nasShareExt.m_kerberosId << HCPENDLOG;
    HCP_Log(DEBUG, MODULE) << "nasShareExt.m_filters: "
        << WIPE_SENSITIVE(m_nasShare.nasShareExt.m_filters) << HCPENDLOG;

    HCP_Log(INFO, MODULE) << "dataLayout.backupFormat: "
        << m_dataLayoutExt.m_backupFormat << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "dataLayout.metadataBackupType: "
        << m_dataLayoutExt.m_metadataBackupType << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "dataLayout.m_fileReplaceStrategy: "
        << m_dataLayoutExt.m_fileReplaceStrategy << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "dataLayout.m_fileCountThreshold: "
        << m_dataLayoutExt.m_fileCountThreshold << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "dataLayout.m_fileSizeThreshold: "
        << m_dataLayoutExt.m_fileSizeThreshold << HCPENDLOG;

    HCP_Log(INFO, MODULE) << "qos.bandwidth: " << m_restoreJobPtr->jobParam.qos.bandwidth << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "qos.protectIops: " << m_restoreJobPtr->jobParam.qos.protectIops << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "qos.backupIops: " << m_restoreJobPtr->jobParam.qos.backupIops << HCPENDLOG;
    return PrintJobInfoForRepoAndFilter();
}

bool HetroRestoreJob::PrintJobInfoForRepoAndFilter()
{
    HCP_Log(INFO, MODULE) << "cacheFs.ip: " << m_cacheFs.endpoint.ip << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "cacheFs.sharePath: " << WIPE_SENSITIVE(m_cacheFs.remotePath) << HCPENDLOG;
    for (std::string &path: m_cacheFs.path)
        HCP_Log(INFO, MODULE) << "cacheFs.localMountPath: " << WIPE_SENSITIVE(path) << HCPENDLOG;
    for (HostAddress svcIp: m_cacheFs.remoteHost)
        HCP_Log(INFO, MODULE) << "cacheFs.svcip: " << svcIp.ip << HCPENDLOG;

    HCP_Log(INFO, MODULE) << "backupFs.ip: " << m_dataFs.endpoint.ip << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "backupFs.sharePath: " << WIPE_SENSITIVE(m_dataFs.remotePath) << HCPENDLOG;
    for (std::string &path: m_dataFs.path)
        HCP_Log(INFO, MODULE) << "backupFs.localMountPath: " << WIPE_SENSITIVE(path) << HCPENDLOG;
    for (HostAddress svcIp: m_dataFs.remoteHost)
        HCP_Log(INFO, MODULE) << "backupFs.svcip: " << svcIp.ip << HCPENDLOG;

    HCP_Log(INFO, MODULE) << "m_metaFs.ip: " << m_metaFs.endpoint.ip << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "m_metaFs.sharePath: " << WIPE_SENSITIVE(m_metaFs.remotePath) << HCPENDLOG;
    for (std::string &path: m_metaFs.path)
        HCP_Log(INFO, MODULE) << "m_metaFs.localMountPath: " << WIPE_SENSITIVE(path) << HCPENDLOG;
    for (HostAddress svcIp: m_metaFs.remoteHost)
        HCP_Log(INFO, MODULE) << "m_metaFs.svcip: " << svcIp.ip << HCPENDLOG;

    for (AppProtect::ResourceFilter filter1: m_restoreJobPtr->jobParam.filters) {
        HCP_Log(INFO, MODULE) << "jobParamfilter1.filterBy: " << filter1.filterBy << HCPENDLOG;
        HCP_Log(INFO, MODULE) << "jobParamfilter1.type: " << filter1.type << HCPENDLOG;
        HCP_Log(INFO, MODULE) << "jobParamfilter1.rule: " << filter1.rule << HCPENDLOG;
        HCP_Log(INFO, MODULE) << "jobParamfilter1.mode: " << filter1.mode << HCPENDLOG;

        for (std::string val: filter1.values) {
            HCP_Log(INFO, MODULE) << "jobParamfilter1.val: " << WIPE_SENSITIVE(val) << HCPENDLOG;
        }
    }
    return true;
}

bool HetroRestoreJob::IsNormalRestore()
{
    return (m_restoreJobPtr->jobParam.restoreType == RestoreJobType::NORMAL_RESTORE);
}

bool HetroRestoreJob::IsFineGrainedHomoRestore()
{
    Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->extendInfo, m_flrExtend);
    return (m_restoreJobPtr->jobParam.restoreType == RestoreJobType::FINE_GRAINED_RESTORE &&
            m_restoreJobPtr->targetObject.subType == "NasFileSystem") ||
            m_flrExtend.targetLocation == FLR_RESTORE_TYPE_NATIVE_VALUE;
}

bool HetroRestoreJob::IsFineGrainedHetroRestore()
{
    Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->extendInfo, m_flrExtend);
    return (m_restoreJobPtr->jobParam.restoreType == RestoreJobType::FINE_GRAINED_RESTORE &&
            m_restoreJobPtr->targetObject.subType == "NasShare" &&
            m_flrExtend.targetLocation != FLR_RESTORE_TYPE_NATIVE_VALUE);
}

bool HetroRestoreJob::IsFineGrainedHetroRestoreCifsToNative()
{
    Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->extendInfo, m_flrExtend);
    return (m_restoreJobPtr->jobParam.restoreType == RestoreJobType::FINE_GRAINED_RESTORE &&
            m_restoreJobPtr->targetObject.subType == "NasShare" &&
            m_flrExtend.targetLocation == FLR_RESTORE_TYPE_NATIVE_VALUE &&
            m_dataFs.protocol == RepositoryProtocolType::type::CIFS);
}

bool HetroRestoreJob::IsHomoNewLocationRestore()
{
    if (m_restoreJobPtr->jobParam.restoreType != RestoreJobType::NORMAL_RESTORE) {
        HCP_Log(INFO, MODULE) << "not normal restore, no need to care it!" << HCPENDLOG;
        return false;
    }
    if (m_restoreJobPtr->copies[0].protectObject.subType != "NasFileSystem") {
        HCP_Log(INFO, MODULE) << "it is NasShare, no need to care it!" << HCPENDLOG;
        return false;
    }
    if (m_restoreJobPtr->copies[0].dataType == AppProtect::CopyDataType::TAPE_STORAGE_COPY) {
        HCP_Log(INFO, MODULE) << "the copy type of NasFileSystem is tape archive !" << HCPENDLOG;
        return true;
    }
    FlrRestoreExtend flrExtend {};
    if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->extendInfo, flrExtend)) {
        HCP_Log(ERR, MODULE) << "convert flr extend info failed!" << HCPENDLOG;
        return false;
    }
    if (flrExtend.targetLocation == FLR_RESTORE_TYPE_ORIGIN_VALUE) {
        HCP_Log(INFO, MODULE) << "NasFileSystem is restored to origin location!" << HCPENDLOG;
        return false;
    }
    return true;
}

bool HetroRestoreJob::SetupCacheFsForRestoreJob()
{
    /**
     * /<m_cacheFsPath.path>: m_cacheFsPath fs path passed by DME_UBC for NAS Plugin to save metadata's
     *
     * Create folders,
     * | -- CacheFsPath             // CacheFsPath from UBC
     *    | -- restore-job          // For restore job
             |-- m_restoreJobPtr->jobId
     *          | -- scan              // Info saved by SCAN module
     *             | -- meta           // Meta info
     *                | -- previous    // Meta info (metafile, dcache, fcache) of the previous scan
     *                | -- latest      // Meta info (metafile, dcache, fcache) of the current scan
     *             | -- ctrl           // Control info (this in input to BACKUP module)
     *          | -- backup            // Info saved by BACKUP module (TO-DO: We will remove this)
     *             | -- ctrl
     *    | -- statistics_{m_jobId}          // Folder to save the statistic of backup main job and sub-jobs
     */
    std::string statisticsPath = m_cacheFsPath + "/statistics_" + m_jobId;
    if (IsDirExist(statisticsPath)) {
        WARNLOG("remove this path %s, maybe it is used by another work", statisticsPath.c_str());
        PluginUtils::RemoveDirectory(statisticsPath);
    }
    if (!CreateDirectory(statisticsPath)) {
        HCP_Log(ERR, MODULE) << "setup statistics for restore job failed" << HCPENDLOG;
        return false;
    }

    std::string cacheFsPath = m_cacheFsPath + "/restore-job/" + m_jobId;
    if (!CreateDirectory(cacheFsPath) ||
        !CreateDirectory(cacheFsPath + "/scan/meta") ||
        !CreateDirectory(cacheFsPath + "/scan/ctrl") ||
        !CreateDirectory(cacheFsPath + "/backup/ctrl")) {
        HCP_Log(ERR, MODULE) << "setup cache-fs for restore job failed" << HCPENDLOG;
        return false;
    }
    return true;
}

int HetroRestoreJob::GenerateAggregateSubJobInner()
{
    HCP_Log(INFO, MODULE) << "Enter GenerateAggregateSubJobInner, job id is " << m_jobId << HCPENDLOG;
    ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0,
        "nas_plugin_hetro_restore_scan_start_label", JobLogLevel::TASK_LOG_INFO);
    if (!InitJobInfo()) {
        HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
        return Module::FAILED;
    }
    PrintJobInfo();
    if (InitAggregateGenerateJobInfo() != Module::SUCCESS) {
        return Module::FAILED;
    }
    if (UnzipCacheFilesForAggregate() != Module::SUCCESS) {
        return Module::FAILED;
    }
    if (m_fineGrainedRestore) {
        if (GenerateSubJobsForAggregateFineGrain() != Module::SUCCESS) {
            HCP_Log(ERR, MODULE) << "Generate restore execute subJobs for aggregate failed." << HCPENDLOG;
            return Module::FAILED;
        }
    } else {
        if (GenerateRestoreExecuteSubJobsForAggregate() != Module::SUCCESS) {
            HCP_Log(ERR, MODULE) << "Generate restore execute subJobs for aggregate failed." << HCPENDLOG;
            return Module::FAILED;
        }
    }

    return Module::SUCCESS;
}

int HetroRestoreJob::InitAggregateGenerateJobInfo()
{
    HCP_Log(INFO, MODULE) << "Enter InitAggregateGenerateJobInfo" << HCPENDLOG;
    m_numberCopies =  m_restoreJobPtr->copies.size();
    if (GetRepoInfoForAggregate() != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "GetRepoInfoForAggregate failed." << HCPENDLOG;
        return Module::FAILED;
    }
    if (RecordControlFilePathForAggregate() != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "RecordControlFilePathForAggregate failed." << HCPENDLOG;
        return Module::FAILED;
    }
    if (CreateSrcDirForAggregate() != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "CreateSrcDirForAggregate failed." << HCPENDLOG;
        return Module::FAILED;
    }
    return Module::SUCCESS;
}

int HetroRestoreJob::UnzipCacheFilesForAggregate()
{
    int ret = Module::SUCCESS;
    m_isUnzipInProgress = true;
    std::thread monitorUnzipDcacheThread(&HetroRestoreJob::UnzipDcachefilesForAggregate, this, std::ref(ret));

    while (m_isUnzipInProgress) {
        HCP_Log(INFO, MODULE) << "Wait for Unzip DCache Files to finish!" << HCPENDLOG;
        std::this_thread::sleep_for(std::chrono::seconds(GENERATE_SUBTASK_MONITOR_DUR_IN_SEC));
    }
    monitorUnzipDcacheThread.join();
    if (ret != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "UnzipDcachefilesForAggregate failed." << HCPENDLOG;
        return Module::FAILED;
    }
    return Module::SUCCESS;
}

int HetroRestoreJob::GetRepoInfoForAggregate()
{
    HCP_Log(INFO, MODULE) << "Enter GetRepoInfoForAggregate" << HCPENDLOG;

    // 多个副本共用一个cache仓,agent只填充最后一个副本的路径
    int lastCopyNumber =  m_restoreJobPtr->copies.size() - 1;
    Copy lastCopy = m_restoreJobPtr->copies[lastCopyNumber];
    string dataPath;
    string metaPath;
    for (uint32_t i = 0; i < lastCopy.repositories.size(); i++) {
        if (lastCopy.repositories[i].repositoryType == RepositoryDataType::CACHE_REPOSITORY) {
            m_cacheFsPath = lastCopy.repositories[i].path[0];
        } else if (lastCopy.repositories[i].repositoryType == RepositoryDataType::DATA_REPOSITORY) {
            dataPath = lastCopy.repositories[i].path[0];
        } else if (lastCopy.repositories[i].repositoryType == RepositoryDataType::META_REPOSITORY) {
            metaPath = lastCopy.repositories[i].path[0];
        }
    }
    HCP_Log(INFO, MODULE) << "dataPath is:" << metaPath << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "metaPath is:" << metaPath << HCPENDLOG;
    for (uint32_t i = 0; i < m_numberCopies; ++i) {
        AggCopyExtendInfo aggCopyExtendInfo;
        if (GetCopyExtendInfo(i, aggCopyExtendInfo) != Module::SUCCESS) {
            return Module::FAILED;
        }
        // 修正聚合格式回复下meta、data仓的路径
        m_dataFsPathList.push_back(aggCopyExtendInfo.dataPathSuffix);
        m_metaFsPathList.push_back(aggCopyExtendInfo.metaPathSuffix);
        m_copyIdList.push_back(aggCopyExtendInfo.metaPathSuffix);
        DBGLOG("m_dataFsPathList[%d]: %s, m_metaFsPathList: %s, m_copyIdList: %s",
            i, aggCopyExtendInfo.dataPathSuffix.c_str(), aggCopyExtendInfo.metaPathSuffix.c_str(),
            aggCopyExtendInfo.metaPathSuffix.c_str());
    }
    return Module::SUCCESS;
}

int HetroRestoreJob::GetCopyExtendInfo(const int& copyOrder, AggCopyExtendInfo& aggCopyExtendInfo)
{
    HCP_Log(INFO, MODULE) << "Enter GetCopyExtendInfo"<< HCPENDLOG;
    if (m_tapeCopy) {
        TapeCopyExtendInfo tapeCopyExtendInfo;
        if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->copies[copyOrder].extendInfo,
                                                    tapeCopyExtendInfo)) {
            HCP_Log(ERR, MODULE) << "parse tapeCopyExtendInfo failed. tapeCopyExtendInfo is: "
                << m_restoreJobPtr->copies[copyOrder].extendInfo << m_restoreJobPtr->jobId << HCPENDLOG;
            return Module::FAILED;
        }
        aggCopyExtendInfo = tapeCopyExtendInfo.extendInfo;
        string mataPath = tapeCopyExtendInfo.metaPath;
        int pos = mataPath.find_last_of('/');
        m_tapeCopyId = std::string(mataPath.substr(pos+1));
        return Module::SUCCESS;
    }
    if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->copies[copyOrder].extendInfo,
                                                aggCopyExtendInfo)) {
        HCP_Log(ERR, MODULE) << "parse aggCopyExtendInfo failed. tapeCopyExtendInfo is: "
            << m_restoreJobPtr->copies[copyOrder].extendInfo << "jobId is: " << m_restoreJobPtr->jobId << HCPENDLOG;
        return Module::FAILED;
    }
    return Module::SUCCESS;
}

int HetroRestoreJob::RecordControlFilePathForAggregate()
{
    HCP_Log(INFO, MODULE) << "Enter RecordControlFilePathForAggregate"<< HCPENDLOG;
    m_scanControlFilePath = m_cacheFsPath + "/scan/ctrl";
    m_restoreControlFilePath = m_cacheFsPath + "/restore/ctrl";
    DBGLOG("m_scanControlFilePath = %s, m_restoreControlFilePath = %s",
        m_scanControlFilePath.c_str(), m_restoreControlFilePath.c_str());
    for (uint32_t i = 0; i < m_numberCopies; ++i) {
        string dcaheAndFcachePath = m_cacheFsPath +
            "/scan/" + to_string(i) + "/meta/latest";
        string scanMetaPath = m_cacheFsPath +
            "/scan/" + to_string(i) + "/meta";

        m_dcaheAndFcachePathList.push_back(dcaheAndFcachePath);
        m_scanMetaPathList.push_back(scanMetaPath);
        DBGLOG("m_dcaheAndFcachePathList[%d] = %s, m_scanMetaPathList[%d] = %s", i,
            dcaheAndFcachePath.c_str(), i, scanMetaPath.c_str());
    }
    return Module::SUCCESS;
}

int HetroRestoreJob::CreateSrcDirForAggregate()
{
    HCP_Log(INFO, MODULE) << "Enter CreateSrcDirForAggregate"<< HCPENDLOG;
    if (!PluginUtils::CreateDirectory(m_scanControlFilePath)) {
        return Module::FAILED;
    }
    if (!PluginUtils::CreateDirectory(m_restoreControlFilePath)) {
        return Module::FAILED;
    }
    for (uint32_t i = 0; i < m_numberCopies; ++i) {
        string dcaheAndFcachePath = m_dcaheAndFcachePathList[i];
        string scanMetaPathList = m_scanMetaPathList[i];
        if (!PluginUtils::CreateDirectory(dcaheAndFcachePath)) {
            return Module::FAILED;
        }
        if (!PluginUtils::CreateDirectory(scanMetaPathList)) {
            return Module::FAILED;
        }
    }
    return Module::SUCCESS;
}

std::vector<Module::CmdParam> HetroRestoreJob::PrepareUnZipCommand(const std::string &mountPath,
    const std::string &dcache,  const std::string &fcache, const std::string &meta, const std::string &xmeta)
{
    std::vector<Module::CmdParam> cmd {
        CmdParam(COMMON_CMD_NAME, "cd"),
        CmdParam(PATH_PARAM, mountPath),
        CmdParam(CONTINUOUS_PARAM, ";"),
        CmdParam(COMMON_CMD_NAME, "tar"),
        CmdParam(CMD_OPTION_PARAM, "-I"),
        CmdParam(CMD_OPTION_PARAM, "pigz"),
        CmdParam(CMD_OPTION_PARAM, "-xf"),
        CmdParam(PATH_PARAM, dcache),
        CmdParam(CMD_OPTION_PARAM, "-C"),
        CmdParam(CURDIR_PARAM, "."),
        CmdParam(LOGICAND_PARAM, "&&"),
        CmdParam(COMMON_CMD_NAME, "tar"),
        CmdParam(CMD_OPTION_PARAM, "-I"),
        CmdParam(CMD_OPTION_PARAM, "pigz"),
        CmdParam(CMD_OPTION_PARAM, "-xf"),
        CmdParam(PATH_PARAM, fcache),
        CmdParam(CMD_OPTION_PARAM, "-C"),
        CmdParam(CURDIR_PARAM, "."),
        CmdParam(LOGICAND_PARAM, "&&"),
        CmdParam(COMMON_CMD_NAME, "tar"),
        CmdParam(CMD_OPTION_PARAM, "-I"),
        CmdParam(CMD_OPTION_PARAM, "pigz"),
        CmdParam(CMD_OPTION_PARAM, "-xf"),
        CmdParam(PATH_PARAM, meta),
        CmdParam(CMD_OPTION_PARAM, "-C"),
        CmdParam(CURDIR_PARAM, "."),
        CmdParam(LOGICAND_PARAM, "&&"),
        CmdParam(COMMON_CMD_NAME, "tar"),
        CmdParam(CMD_OPTION_PARAM, "-I"),
        CmdParam(CMD_OPTION_PARAM, "pigz"),
        CmdParam(CMD_OPTION_PARAM, "-xf"),
        CmdParam(PATH_PARAM, xmeta),
        CmdParam(CMD_OPTION_PARAM, "-C"),
        CmdParam(CURDIR_PARAM, ".")
    };

    return cmd;
}

void HetroRestoreJob::UnzipDcachefilesForAggregate(int &ret)
{
    std::vector<std::string> result;
    for (uint32_t i = 0; i < m_numberCopies; ++i) {
        string metaFsPath = m_metaFsPath + "/" + m_metaFsPathList[i];
        string oldVersionScannerMetaZipFileName  = metaFsPath + "/filemeta/metafile.zip";
        string dirCacheZipFileName = metaFsPath + "/filemeta/metafile_DIRCACHE.gz";
        string fCacheZipFileName = metaFsPath + "/filemeta/metafile_FILECACHE.gz";
        string metaZipFilename  = metaFsPath + "/filemeta/metafile_META.gz";
        string xmetaZipFileName = metaFsPath + "/filemeta/metafile_XMETA.gz";
        std::vector<std::string> result;
        string unZipPath = m_cacheFsPath + "/scan/" + to_string(i) + "/meta/latest";
        std::vector<Module::CmdParam> execCmd;

        if (PluginUtils::IsFileExist(oldVersionScannerMetaZipFileName)) {
            execCmd = {CmdParam(COMMON_CMD_NAME, "unzip"), CmdParam(CMD_OPTION_PARAM, "-o"),
                CmdParam(PATH_PARAM, oldVersionScannerMetaZipFileName), CmdParam(CMD_OPTION_PARAM, "-d"),
                CmdParam(PATH_PARAM, m_dcaheAndFcachePathList[i])};
        }  else if (IsFileExist(dirCacheZipFileName) && IsFileExist(fCacheZipFileName) &&
            IsFileExist(metaZipFilename) && IsFileExist(xmetaZipFileName)) {
            execCmd = PrepareUnZipCommand(unZipPath, dirCacheZipFileName, fCacheZipFileName, metaZipFilename,
                xmetaZipFileName);
        } else {
            m_isUnzipInProgress = false;
            ret = Module::FAILED;
            return;
        }
        std::unordered_set<std::string> pathWhite = {
            unZipPath, metaFsPath + "/filemeta", m_dcaheAndFcachePathList[i]
        };
        if (Module::RunCommand(execCmd[0].Value(), execCmd, result, pathWhite) != 0) {
            m_isUnzipInProgress = false;
            ret = Module::FAILED;
            return;
        }
    }
    m_isUnzipInProgress = false;
    ret = Module::SUCCESS;
}

int HetroRestoreJob::GenerateRestoreExecuteSubJobsForAggregate()
{
    HCP_Log(INFO, MODULE) << "Enter GenerateRestoreExecuteSubJobsForAggregate." << HCPENDLOG;
    for (uint32_t i = 0; i < m_numberCopies; ++i) {
        ScanConfig scanConfig;
        m_orderNumberForAggregate = i;
        m_scanControlFilePath = m_cacheFsPath + "/scan/ctrl/" + to_string(m_orderNumberForAggregate);
        if (!PluginUtils::CreateDirectory(m_scanControlFilePath)) {
            return Module::FAILED;
        }
        // 第一个副本采用全量副本，后面的副本均为增量副本
        HCP_Log(INFO, MODULE) << "==========m_orderNumberForAggregate is: " << m_orderNumberForAggregate << HCPENDLOG;
        if (i == FIRST_GENERATE_CONTROL_FILE) {
            FillFirstScanConfigForAggregate(scanConfig);
        } else {
            FillScanConfigForAggregate(scanConfig);
        }

        if (StartToGenerateControlFileForAggregate(scanConfig) != Module::SUCCESS) {
            return Module::FAILED;
        }
        ScanStatistics statistic = m_scanner->GetStatistics();
        m_scanStatistics = AddScanStatistics(statistic, m_scanStatistics);
        m_scanner->Destroy();
    }

    HCP_Log(INFO, MODULE) << "Generate restore Execute SubJobs For Aggregate completed." << HCPENDLOG;
    return Module::SUCCESS;
}

int HetroRestoreJob::GenerateSubJobsForAggregateFineGrain()
{
    HCP_Log(INFO, MODULE) << "Enter GenerateSubJobsForAggregateFineGrain." << HCPENDLOG;
    ScanConfig scanConfig;
    m_orderNumberForAggregate = m_numberCopies - 1;
    if (m_numberCopies >= NUMBER2) {
        FillScanConfigForAggregate(scanConfig);
    } else {
        FillFirstScanConfigForAggregate(scanConfig);
    }

    FillScanConfigForFilter(scanConfig);

    if (StartToGenerateControlFileForAggregate(scanConfig) != Module::SUCCESS) {
        return Module::FAILED;
    }
    ScanStatistics statistic = m_scanner->GetStatistics();
    m_scanStatistics = AddScanStatistics(statistic, m_scanStatistics);
    m_scanner->Destroy();
    HCP_Log(INFO, MODULE) << "Generate subJobs for aggregate fineGrain completed." << HCPENDLOG;
    return Module::SUCCESS;
}

int HetroRestoreJob::FillFirstScanConfigForAggregate(ScanConfig& scanConfig)
{
    HCP_Log(INFO, MODULE) << "Enter FillFirstScanConfigForAggregate" << HCPENDLOG;

    scanConfig.reqID  = PluginUtils::GenerateHash(m_jobId);
    scanConfig.jobId = m_jobId;

    /* config meta path */
    scanConfig.metaPath = m_scanMetaPathList[m_orderNumberForAggregate];
    scanConfig.curDcachePath  = m_dcaheAndFcachePathList[m_orderNumberForAggregate];
    scanConfig.metaPathForCtrlFiles = m_scanControlFilePath;

    scanConfig.scanType = ScanJobType::CONTROL_GEN;
    scanConfig.scanIO = IOEngine::DEFAULT;

    scanConfig.maxOpendirReqCount = MAX_OPEN_DIR_REQ_4000;

    // 生成全量的控制文件
    scanConfig.generatorIsFull = true;
    /* 记录线程数 */
    scanConfig.maxCommonServiceInstance = 1;

    scanConfig.usrData = (void*)this;
    scanConfig.scanResultCb = ScannerCtrlFileCallBack;
    scanConfig.scanHardlinkResultCb = ScannerHardLinkCallBack;

    FillScanConfigForAggr(scanConfig);

    scanConfig.scanCtrlFileTimeSec = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_FILE_TIME_SEC");
    scanConfig.scanMetaFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_DEFAULT_META_FILE_SIZE");
    scanConfig.scanCheckPointEnable = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CHECKPOINT_ENABLED");
    scanConfig.triggerTime = GetCurrentTimeInSeconds();
    HCP_Log(INFO, MODULE) << "scanConfig.curDcachePath: " << scanConfig.curDcachePath <<  HCPENDLOG;
    HCP_Log(INFO, MODULE) << "EXIT FillScanConfig" << HCPENDLOG;
    return Module::SUCCESS;
}

int HetroRestoreJob::FillScanConfigForAggregate(ScanConfig& scanConfig)
{
    HCP_Log(INFO, MODULE) << "Enter FillScanConfigForAggregate" << HCPENDLOG;

    scanConfig.reqID = PluginUtils::GenerateHash(m_jobId);
    scanConfig.jobId = m_jobId;
    /* config meta path */
    scanConfig.metaPath = m_scanMetaPathList[m_orderNumberForAggregate];
    scanConfig.metaPathForCtrlFiles = m_scanControlFilePath;

    // 用于生成增量的控制文件
    scanConfig.curDcachePath  = m_dcaheAndFcachePathList[m_orderNumberForAggregate];
    scanConfig.prevDcachePath  = m_dcaheAndFcachePathList[m_orderNumberForAggregate-1];

    scanConfig.scanType = ScanJobType::CONTROL_GEN;
    scanConfig.scanIO = IOEngine::DEFAULT;

    scanConfig.maxOpendirReqCount = MAX_OPEN_DIR_REQ_4000;

     /* 生成增量控制文件 */
    scanConfig.generatorIsFull = false;

    /* 记录线程数 */
    scanConfig.maxCommonServiceInstance = 1;

    scanConfig.usrData = (void*)this;
    scanConfig.scanResultCb = ScannerCtrlFileCallBack;
    scanConfig.scanHardlinkResultCb = ScannerHardLinkCallBack;

    FillScanConfigForAggr(scanConfig);
    scanConfig.scanCtrlFileTimeSec = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_FILE_TIME_SEC");
    scanConfig.scanMetaFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_DEFAULT_META_FILE_SIZE");
    scanConfig.scanCheckPointEnable = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CHECKPOINT_ENABLED");
    scanConfig.triggerTime = PluginUtils::GetCurrentTimeInSeconds();
    HCP_Log(INFO, MODULE) << "scanConfig.curDcachePath: " << scanConfig.curDcachePath <<  HCPENDLOG;
    HCP_Log(INFO, MODULE) << "scanConfig.prevDcachePath: " << scanConfig.prevDcachePath <<  HCPENDLOG;
    HCP_Log(INFO, MODULE) << "EXIT FillScanConfig" << HCPENDLOG;
    return Module::SUCCESS;
}

int HetroRestoreJob::StartToGenerateControlFileForAggregate(ScanConfig& scanConfig)
{
    m_scanner = ScanMgr::CreateScanInst(scanConfig);
    if (m_scanner == nullptr) {
        ERRLOG("Start Scanner failed!");
        return Module::FAILED;
    }

    if (m_scanner->Start() != SCANNER_STATUS::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Start scanner instance failed!" << HCPENDLOG;
        return Module::FAILED;
    }
    MonitorScanner(m_restoreScanStatistics, m_restoreJobStatus, m_restoreJobLogLabel, m_restoreJobProgress);
    return Module::SUCCESS;
}

int HetroRestoreJob::GetExecuteSubJobType()
{
    if (!Module::JsonHelper::JsonStringToStruct(m_subJobInfo->jobInfo, m_subJobPathsInfo)) {
        HCP_Log(ERR, MODULE) << "Get restore subjob info failed" << HCPENDLOG;
        return Module::FAILED;
    }
    std::unordered_map<uint32_t, BackupPhase> backupPhaseMap = {
        { SUBJOB_TYPE_DATACOPY_COPY_PHASE,       BackupPhase::COPY_STAGE },
        { SUBJOB_TYPE_DATACOPY_HARDLINK_PHASE,   BackupPhase::HARDLINK_STAGE },
        { SUBJOB_TYPE_DATACOPY_DELETE_PHASE,     BackupPhase::DELETE_STAGE },
        { SUBJOB_TYPE_DATACOPY_DIRMTIME_PHASE,   BackupPhase::DIR_STAGE },
        { SUBJOB_TYPE_CREATE_SUBJOB_PHASE,       BackupPhase::UNKNOWN_STAGE },
        { SUBJOB_TYPE_CHECK_SUBJOB_PHASE,        BackupPhase::UNKNOWN_STAGE },
        { SUBJOB_TYPE_SET_CHANNELS_PHASE,        BackupPhase::UNKNOWN_STAGE }
    };
    auto iter = backupPhaseMap.find(m_subJobPathsInfo.m_SubTaskType);
    if (iter == backupPhaseMap.end()) {
        ERRLOG("subJobInfo.subJobType(%lu) is invalid", m_subJobPathsInfo.m_SubTaskType);
        return Module::FAILED;
    }
    m_backupPhase = iter->second;
    return Module::SUCCESS;
}

template<typename... Args>
bool HetroRestoreJob::ReportJobDetails(SubJobStatus::type jobStatus, int32_t jobProgress,
    std::string logLabel, const JobLogLevel::type &logLevel, Args... logArgs)
{
    SubJobDetails subJobDetails {};
    ActionResult result {};
    std::vector<LogDetail> logDetailList;
    LogDetail logDetail{};
    int32_t jobSpeed = 0;

    // 业务子任务的保活上报限制频率， 90s一次
    if (logLabel.empty() &&
        jobStatus == SubJobStatus::RUNNING) {
        int64_t currTime = PluginUtils::GetCurrentTimeInSeconds();
        if ((currTime - m_lastKeepAliveReportTime) < REPORT_INTERVAL) {
            return true;
        }
        m_lastKeepAliveReportTime = currTime;
    }

    if (IsAbortJob() && jobStatus == SubJobStatus::RUNNING) {
        HCP_Log(INFO, MODULE) << "Job is aborted, force change jobStatus to aborting for"
            << " taskid: " << m_jobId << ", subtaskid: " << m_subJobId << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTING;
        logLabel = "";
    }

    INFOLOG("Enter ReportJobDetails. jobId: %s, subJobId: %s, jobStatus: %d, jobProgress: %d, jobLabel: %s, logLevel: %d",
        m_parentJobId.c_str(), m_subJobId.c_str(), static_cast<int>(jobStatus), jobProgress, logLabel.c_str(),
        static_cast<int>(logLevel));

    if (logLabel != "") {
        AddLogDetail(logDetail, logLabel, logLevel, logArgs...);
    }

    /* TO-DO: Later, discuss with homo team to change REPORT_LOG2AGENT macro to add new param for data size */
    if (m_dataSize != 0) {
        subJobDetails.__set_dataSize(m_dataSize);
    }

    REPORT_LOG2AGENT(subJobDetails, result, logDetailList, logDetail, jobProgress, jobSpeed, jobStatus);
    if (result.code != MP_SUCCESS) {
        HCP_Log(ERR, MODULE) << "Report job details to agent failed: " << result.code <<  HCPENDLOG;
        return false;
    }

    INFOLOG("Exit ReportJobDetails. jobId: %s, subJobId: %s, jobStatus: %d, jobProgress: %d, logLabel: %s, logLevel: %d",
        m_parentJobId.c_str(), m_subJobId.c_str(), static_cast<int>(jobStatus), jobProgress, logLabel.c_str(),
        static_cast<int>(logLevel));
    return true;
}

template<typename... Args>
bool HetroRestoreJob::ReportJobDetailsWithErrorCode(SubJobStatus::type jobStatus, int32_t jobProgress,
    std::string logLabel, const JobLogLevel::type &logLevel, const int64_t errCode, Args... logArgs)
{
    SubJobDetails subJobDetails {};
    ActionResult result {};
    std::vector<LogDetail> logDetailList;
    LogDetail logDetail{};
    int32_t jobSpeed = 0;

    if (IsAbortJob() && jobStatus == SubJobStatus::RUNNING) {
        HCP_Log(INFO, MODULE) << "Job is aborted, force change jobStatus to aborting for"
            << " taskid: " << m_jobId << ", subtaskid: " << m_subJobId << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTING;
        logLabel = "";
    }

    HCP_Log(INFO, MODULE) << "Enter ReportJobDetailsWithErrorCode: "
        << "jobId: " << m_parentJobId
        << ", subJobId: " << m_subJobId
        << ", jobStatus: " << jobStatus
        << ", ErrorCode: " << errCode
        << ", jobProgress: " << jobProgress
        << ", logLabel: " << logLabel
        << ", logLevel: " << logLevel
        << HCPENDLOG;

    if (logLabel != "") {
        AddLogDetail(logDetail, logLabel, logLevel, logArgs...);
    }

    /* TO-DO: Later, discuss with homo team to change REPORT_LOG2AGENT macro to add new param for data size */
    if (m_dataSize != 0) {
        subJobDetails.__set_dataSize(m_dataSize);
    }

    AddErrCode(logDetail, errCode);

    REPORT_LOG2AGENT(subJobDetails, result, logDetailList, logDetail, jobProgress, jobSpeed, jobStatus);
    if (result.code != MP_SUCCESS) {
        HCP_Log(ERR, MODULE) << "Report job details to agent failed: " << result.code <<  HCPENDLOG;
        return false;
    }

    HCP_Log(INFO, MODULE) << "Exit ReportJobDetailsWithErrorCode: "
        << "jobId: " << m_parentJobId
        << ", subJobId: " << m_subJobId
        << ", jobStatus: " << jobStatus
        << ", ErrorCode: " << errCode
        << ", jobProgress: " << jobProgress
        << ", logLabel: " << logLabel
        << ", logLevel: " << logLevel
        << HCPENDLOG;
    return true;
}

template<typename... Args>
void HetroRestoreJob::ReportJobDetailsWithLabel(SubJobStatus::type jobStatus,
    const std::string& logLabel, int32_t jobProgress, std::vector<std::string> &message,
    const JobLogLevel::type &logLevel, const int64_t errCode, Args... logArgs)
{
    SubJobDetails subJobDetails;
    LogDetail logDetail {};
    logDetail.__set_additionalDesc(message);
    std::vector<LogDetail> logDetailList;
    ActionResult result;
    AddLogDetail(logDetail, logLabel, logLevel, logArgs...);
    AddErrCode(logDetail, errCode);
    REPORT_LOG2AGENT(subJobDetails, result, logDetailList, logDetail, jobProgress, 0, jobStatus);
}

bool HetroRestoreJob::StartScanner()
{
    ScanConfig scanConfig {};
    FillScanConfig(scanConfig);

    m_scanner = ScanMgr::CreateScanInst(scanConfig);
    if (m_scanner == nullptr) {
        ERRLOG("init scanner failed!");
        return false;
    }

    m_scanner->Enqueue(".");

    if (m_scanner->Start() != SCANNER_STATUS::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Start scanner instance failed" << HCPENDLOG;
        m_scanner->Destroy();
        return false;
    }
    m_scanStats.m_scanStarted = true;
    if (!UpdateScanStatsResource(m_jobId, m_scanStats)) {
        HCP_Log(ERR, MODULE) << "Update Scan Stats failed!" << HCPENDLOG;
        return false;
    }
    return true;
}

bool HetroRestoreJob::InitPathForRestore()
{
    HCP_Log(INFO, MODULE) << "Enter InitPathForRestore." << HCPENDLOG;
    m_dcaheAndFcachePath =  m_cacheFsPath + "/restore-job/"+ m_jobId + "/scan/meta";
    m_scanControlFilePath = m_cacheFsPath + "/restore-job/"+ m_jobId + "/scan/control";
    m_restoreControlFilePath = m_cacheFsPath + "/restore-job/"+ m_jobId + "/restore/control";
    HCP_Log(INFO, MODULE) << "m_dcaheAndFcachePath: " << m_dcaheAndFcachePath << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "m_scanControlFilePath: " << m_scanControlFilePath << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "m_restoreControlFilePath: " << m_restoreControlFilePath << HCPENDLOG;

    if (!PluginUtils::CreateDirectory(m_dcaheAndFcachePath)) {
        return false;
    }
    if (!PluginUtils::CreateDirectory(m_scanControlFilePath)) {
        return false;
    }
    if (!PluginUtils::CreateDirectory(m_restoreControlFilePath)) {
        return false;
    }
    return true;
}

bool HetroRestoreJob::UnzipDcachefiles()
{
    m_unzipMetaFilesStatus = 0;
    std::thread unzipWorkerThread = std::thread(&HetroRestoreJob::UnzipDcachefilesInner, this);
    while (m_unzipMetaFilesStatus == 0) {
        INFOLOG("Wait for unzip dcach and fcache files to finish!");
        std::this_thread::sleep_for(std::chrono::seconds(GENERATE_SUBTASK_MONITOR_DUR_IN_SEC));
    }
    unzipWorkerThread.join();
    if (m_unzipMetaFilesStatus == 1) {
        INFOLOG("unzip dcach and fcache files success.");
        return true;
    } else {
        INFOLOG("unzip dcach and fcache files failed.");
        return false;
    }
}

int HetroRestoreJob::UnzipDcachefilesInner()
{
    vector<string> result;
    string oldVersionScannerMetaZipFileName  = m_metaFsPath + "/filemeta/metafile.zip";
    string dirCacheZipFileName = m_metaFsPath + "/filemeta/metafile_DIRCACHE.gz";
    string fCacheZipFileName = m_metaFsPath + "/filemeta/metafile_FILECACHE.gz";
    string metaZipFilename  = m_metaFsPath + "/filemeta/metafile_META.gz";
    string xmetaZipFileName = m_metaFsPath + "/filemeta/metafile_XMETA.gz";
    string unZipPath = m_cacheFsPath + "/restore-job/" + m_jobId + "/scan/meta";
    std::vector<Module::CmdParam> execCmd;

    if (PluginUtils::IsFileExist(oldVersionScannerMetaZipFileName)) {
        execCmd = {CmdParam(COMMON_CMD_NAME, "unzip"), CmdParam(CMD_OPTION_PARAM, "-o"),
            CmdParam(PATH_PARAM, oldVersionScannerMetaZipFileName), CmdParam(CMD_OPTION_PARAM, "-d"),
            CmdParam(PATH_PARAM, m_dcaheAndFcachePath)};
    } else if (PluginUtils::IsFileExist(dirCacheZipFileName) &&
        PluginUtils::IsFileExist(fCacheZipFileName) &&
        PluginUtils::IsFileExist(metaZipFilename) &&
        PluginUtils::IsFileExist(xmetaZipFileName)) {
        execCmd = PrepareUnZipCommand(unZipPath, dirCacheZipFileName, fCacheZipFileName, metaZipFilename,
            xmetaZipFileName);
    } else {
        ERRLOG("Zip file not found to uncompress");
        m_unzipMetaFilesStatus = -1;
        return Module::FAILED;
    }
    std::unordered_set<std::string> pathWhite = {
        m_metaFsPath + "/filemeta", unZipPath, m_dcaheAndFcachePath
    };
    if (Module::RunCommand(execCmd[0].Value(), execCmd, result, pathWhite) != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "unzip meta failed! the ret failed"<< HCPENDLOG;
        m_unzipMetaFilesStatus = -1;
        return Module::FAILED;
    }
    m_unzipMetaFilesStatus = 1;
    return Module::SUCCESS;
}

void HetroRestoreJob::FillScanConfigBasedOnEnviroment(ScanConfig &scanConfig)
{
    std::string deployType;
    if (PluginConfig::GetInstance().m_scene != PluginUsageScene::EXTERNAL) {
        deployType = PluginUtils::GetDeployType();
    }
    if (deployType == X6000_DEPLOY_TYPE) {
        scanConfig.maxOpendirReqCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_OPENDIR_REQ_CNT");
        scanConfig.maxWriteQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_MAX_WRITE_QUEUE_SIZE");
        scanConfig.maxScanQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_MAX_SCAN_QUEUE_SIZE");
        scanConfig.minScanQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_MIN_SCAN_QUEUE_SIZE");
        scanConfig.writeQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_WRITE_QUEUE_SIZE");
        scanConfig.dirEntryReadCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X6000_DIR_ENTRY_READ_COUNT");
        scanConfig.scanMetaFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_X6000_DEFAULT_META_FILE_SIZE");
    } else {
        scanConfig.maxOpendirReqCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_OPENDIR_REQ_CNT");
        scanConfig.maxWriteQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_MAX_WRITE_QUEUE_SIZE");
        scanConfig.maxScanQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_MAX_SCAN_QUEUE_SIZE");
        scanConfig.minScanQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_MIN_SCAN_QUEUE_SIZE");
            scanConfig.writeQueueSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_WRITE_QUEUE_SIZE");
        scanConfig.dirEntryReadCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_X8000_DIR_ENTRY_READ_COUNT");
        scanConfig.scanMetaFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_X8000_DEFAULT_META_FILE_SIZE");
    }
}

void HetroRestoreJob::FillScanConfigBasedOnProtocol(ScanConfig &scanConfig)
{
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
        scanConfig.nfs.m_serverIp = StripSqrBracketsFromIpAddress(m_dataFsSvcIp);
        scanConfig.nfs.m_serverPath = m_dataFs.remotePath;
        scanConfig.nfs.m_nasServerCheckSleepTime = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_BACKUP_NAS_SERVER_CHECK_SLEEP_TIME");
        scanConfig.nfs.maxOpendirReqCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_LIBNFS_OPENDIR_REQ_CNT");
    } else {
        scanConfig.smb.server = m_dataFs.remoteHost[g_nodeLevelTaskInfo.GetSubTasksCount() %
            m_dataFs.remoteHost.size()].ip;
        scanConfig.smb.domain = m_nasShare.nasShareExt.m_domainName;

        scanConfig.smb.share = m_dataFs.remotePath;
        scanConfig.smb.version = HetroCommonService::ConvertStringToSmbVersion(m_generalInfo.m_protocolVersion);

        scanConfig.smb.user = m_dataFs.auth.authkey;
        scanConfig.smb.password = m_dataFs.auth.authPwd;
        if (m_nasShare.nasShareExt.m_encryption == SMB_ENCRYPTION) {
            scanConfig.smb.encryption = true;
        } else {
            scanConfig.smb.encryption = false;
        }

        scanConfig.smb.authType = Module::SmbAuthType::NTLMSSP;
    }
    return;
}

void HetroRestoreJob::FillScanConfig(ScanConfig &scanConfig)
{
    HCP_Log(INFO, MODULE) << " Enter FillScanConfig" << HCPENDLOG;
    scanConfig.jobId = m_jobId;
    scanConfig.reqID = m_mainJobRequestId;

    scanConfig.curDcachePath  = m_dcaheAndFcachePath;
    scanConfig.metaPathForCtrlFiles = m_scanControlFilePath;

    scanConfig.scanType = ScanJobType::CONTROL_GEN;
    scanConfig.scanIO = IOEngine::DEFAULT;
    scanConfig.generatorIsFull = true;

    scanConfig.usrData = (void *)this;
    scanConfig.lastBackupTime = 0;
    scanConfig.useLastBackupTime = false;
    FillScanConfigBasedOnEnviroment(scanConfig);
    FillScanConfigBasedOnProtocol(scanConfig);
    FillScanConfigForFilter(scanConfig);

    /* Path */
    scanConfig.metaPath =  m_cacheFsPath + "/restore-job/"+ m_jobId + "/scan/meta";

    HCP_Log(INFO, MODULE) << "META PATH " << scanConfig.metaPath << "   "
                         << scanConfig.metaPathForCtrlFiles << HCPENDLOG;

    /* Callbacks Regiter */
    scanConfig.scanResultCb = ScannerCtrlFileCallBack;
    scanConfig.scanHardlinkResultCb = ScannerHardLinkCallBack;
    scanConfig.mtimeCtrlCb = RestoreDirMTimeCallBack;
    scanConfig.deleteCtrlCb = RestoreDelCtrlCallBack;

    scanConfig.maxCommonServiceInstance = 1;

    FillScanConfigForNative(scanConfig);
    scanConfig.scanCtrlFileTimeSec = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_FILE_TIME_SEC");
    scanConfig.scanCheckPointEnable = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CHECKPOINT_ENABLED");
    scanConfig.triggerTime = GetCurrentTimeInSeconds();
}

void HetroRestoreJob::FillScanConfigForNative(ScanConfig &scanConfig)
{
    if (!m_dataLayoutExt.m_fileCountThreshold.empty()) {
        INFOLOG("FileCountThreshold: %s", m_dataLayoutExt.m_fileCountThreshold.c_str());
        scanConfig.scanCtrlMaxEntriesFullBkup =PluginUtils::SafeStou32(m_dataLayoutExt.m_fileCountThreshold);
        scanConfig.scanCtrlMaxEntriesIncBkup = PluginUtils::SafeStou32(m_dataLayoutExt.m_fileCountThreshold);
    } else {
        scanConfig.scanCtrlMaxEntriesFullBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_ENTRIES_FULLBKUP");
        scanConfig.scanCtrlMaxEntriesIncBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_ENTRIES_INCRBKUP");
    }
 
    scanConfig.scanCtrlMinEntriesFullBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_ENTRIES_FULLBKUP");
    scanConfig.scanCtrlMinEntriesIncBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_ENTRIES_INCRBKUP");
    
    if (scanConfig.scanCtrlMinEntriesFullBkup > scanConfig.scanCtrlMaxEntriesFullBkup) {
        scanConfig.scanCtrlMinEntriesFullBkup = scanConfig.scanCtrlMaxEntriesFullBkup;
    }
 
    if (scanConfig.scanCtrlMinEntriesIncBkup > scanConfig.scanCtrlMaxEntriesIncBkup) {
        scanConfig.scanCtrlMinEntriesIncBkup = scanConfig.scanCtrlMaxEntriesIncBkup;
    }
 
    if (!m_dataLayoutExt.m_fileSizeThreshold.empty()) {
        INFOLOG("FileSizeThreshold: %s", m_dataLayoutExt.m_fileSizeThreshold.c_str());
        scanConfig.scanCtrlMaxDataSize = PluginUtils::ConvertGBToBytes(m_dataLayoutExt.m_fileSizeThreshold);
    } else {
        scanConfig.scanCtrlMaxDataSize = Module::ConfigReader::getString(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_DATASIZE");
    }
 
    scanConfig.scanCtrlMinDataSize = Module::ConfigReader::getString(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_DATASIZE");
    if (PluginUtils::SafeStou64(scanConfig.scanCtrlMinDataSize)
        > PluginUtils::SafeStou64(scanConfig.scanCtrlMaxDataSize)) {
        scanConfig.scanCtrlMinDataSize = scanConfig.scanCtrlMaxDataSize;
    }
    scanConfig.scanCopyCtrlFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_FILE_SIZE");
}
 
void HetroRestoreJob::FillScanConfigForAggr(ScanConfig &scanConfig)
{
    if (!m_dataLayoutExt.m_fileCountThreshold.empty()) {
        INFOLOG("FileCountThreshold: %s", m_dataLayoutExt.m_fileCountThreshold.c_str());
        scanConfig.scanCtrlMaxEntriesFullBkup = PluginUtils::SafeStou32(m_dataLayoutExt.m_fileCountThreshold);
        scanConfig.scanCtrlMaxEntriesIncBkup = PluginUtils::SafeStou32(m_dataLayoutExt.m_fileCountThreshold);
    } else {
        scanConfig.scanCtrlMaxEntriesFullBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_ENTRIES_FULLBKUP_AGGR");
        scanConfig.scanCtrlMaxEntriesIncBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_ENTRIES_INCRBKUP_AGGR");
    }
    scanConfig.scanCtrlMinEntriesFullBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_ENTRIES_FULLBKUP_AGGR");
    scanConfig.scanCtrlMinEntriesIncBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_ENTRIES_INCRBKUP_AGGR");
    if (scanConfig.scanCtrlMinEntriesFullBkup > scanConfig.scanCtrlMaxEntriesFullBkup) {
        scanConfig.scanCtrlMinEntriesFullBkup = scanConfig.scanCtrlMaxEntriesFullBkup;
    }
    if (scanConfig.scanCtrlMinEntriesIncBkup > scanConfig.scanCtrlMaxEntriesIncBkup) {
        scanConfig.scanCtrlMinEntriesIncBkup = scanConfig.scanCtrlMaxEntriesIncBkup;
    }
 
    if (!m_dataLayoutExt.m_fileSizeThreshold.empty()) {
        INFOLOG("FileSizeThreshold: %s", m_dataLayoutExt.m_fileSizeThreshold.c_str());
        scanConfig.scanCtrlMaxDataSize = PluginUtils::ConvertGBToBytes(m_dataLayoutExt.m_fileSizeThreshold);
    } else {
        scanConfig.scanCtrlMaxDataSize = Module::ConfigReader::getString(DME_NAS_CONFIG_SECTION,
            "DME_NAS_SCAN_CTRL_MAX_DATASIZE_AGGR");
    }
    scanConfig.scanCtrlMinDataSize = Module::ConfigReader::getString(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_DATASIZE_AGGR");
    if (PluginUtils::SafeStou64(scanConfig.scanCtrlMinDataSize)
        > PluginUtils::SafeStou64(scanConfig.scanCtrlMaxDataSize)) {
        INFOLOG("scanCtrlMinDataSize is greater than scanCtrlMaxDataSize, set them equal");
        scanConfig.scanCtrlMinDataSize = scanConfig.scanCtrlMaxDataSize;
    }
    scanConfig.scanCopyCtrlFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_FILE_SIZE_AGGR");
}

void HetroRestoreJob::FillScanConfigForFilter(ScanConfig &scanConfig)
{
    HCP_Log(INFO, MODULE) << "Enter FillScanConfigForFilter" << HCPENDLOG;
    // 不是细粒度恢复，不用添加过滤规则
    if (!m_fineGrainedRestore) {
        return;
    }
    vector<string> dirFilterRule;
    vector<string> fileFilterRule;
    for (auto resource : m_restoreResources) {
        // is dir or file , the last char is not '/' is file
        HCP_Log(INFO, MODULE) << "######filter rule is: " << resource.name << HCPENDLOG;
        if (*resource.name.rbegin() != '/') { // file
            HCP_Log(INFO, MODULE) << "file filter rule is: " << resource.name << HCPENDLOG;
            fileFilterRule.push_back(resource.name);
        } else { // dir
            HCP_Log(INFO, MODULE) << "dir filter rule is: " << resource.name << HCPENDLOG;
            dirFilterRule.push_back(resource.name);
        }
    }
    scanConfig.dCtrlFltr = dirFilterRule;
    scanConfig.fCtrlFltr = fileFilterRule;
    return;
}

bool HetroRestoreJob::StartBackup(HetroBackupSubJob backupSubJob)
{
    BackupParams backupParams {};
    FillBackupConfig(backupParams, backupSubJob);

    m_backup = FS_Backup::BackupMgr::CreateBackupInst(backupParams);
    if (m_backup == nullptr) {
        HCP_Log(ERR, MODULE) << "Create backup instance failed" << HCPENDLOG;
        return false;
    }
    if (BackupRetCode::SUCCESS != m_backup->Enqueue(backupSubJob.m_ControlFile)) {
        HCP_Log(ERR, MODULE) << "enqueue backup instance failed" << HCPENDLOG;
        return false;
    }
    if (BackupRetCode::SUCCESS != m_backup->Start()) {
        HCP_Log(ERR, MODULE) << "Start backup instance failed" << HCPENDLOG;
        return false;
    }
    return true;
}

void HetroRestoreJob::FillBackupNFSConfig(BackupParams &backupParams, const HetroBackupSubJob &backupSubJob)
{
    LibnfsBackupAdvanceParams srcAdvanceParams {};
    LibnfsBackupAdvanceParams dstAdvanceParams {};
    std::string backupIp {};

    srcAdvanceParams.maxPendingAsyncReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MAX_PENDING_ASYNC_REQ_CNT");
    srcAdvanceParams.minPendingAsyncReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MIN_PENDING_ASYNC_REQ_CNT");
    srcAdvanceParams.maxPendingWriteReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MAX_PENDING_WRITE_REQ_CNT");
    srcAdvanceParams.minPendingWriteReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MIN_PENDING_WRITE_REQ_CNT");
    srcAdvanceParams.maxPendingReadReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MAX_PENDING_READ_REQ_CNT");
    srcAdvanceParams.minPendingReadReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MIN_PENDING_READ_REQ_CNT");
    srcAdvanceParams.serverCheckMaxCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_NAS_SERVER_CHECK_MAX_ERR_COUNT");
    srcAdvanceParams.serverCheckSleepTime = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_NAS_SERVER_CHECK_SLEEP_TIME");
    srcAdvanceParams.serverCheckRetry= Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_NAS_SERVER_CHECK_RETRY_CNT");
    srcAdvanceParams.jobStartTime = m_generalInfo.m_backupCopyPhaseStartTime;
    srcAdvanceParams.deleteJobStartTime = m_generalInfo.m_backupDelPhaseStartTime;
    srcAdvanceParams.protocolVersion = m_generalInfo.m_protocolVersion;
    dstAdvanceParams = srcAdvanceParams;
    srcAdvanceParams.sharePath = m_aggregateRestore ? m_dataFs.remotePath + "/" + backupSubJob.copyId :
        m_dataFs.remotePath;
    dstAdvanceParams.sharePath = m_nasShare.sharePath;
    dstAdvanceParams.dataPath = "/mnt/DataBackup/" + m_subJobId;
    uint16_t index = g_nodeLevelTaskInfo.GetSubTasksCount() % m_dataFs.remoteHost.size();
    srcAdvanceParams.ip = StripSqrBracketsFromIpAddress(m_dataFs.remoteHost[index].ip);
    dstAdvanceParams.ip = StripSqrBracketsFromIpAddress(m_nasShare.nasShareExt.m_serviceIP);
    HCP_Log(INFO, MODULE) << "Loadbalance selected, backupParams.backupIp: " <<
        m_dataFs.remoteHost[index].ip << HCPENDLOG;

    backupParams.srcAdvParams = make_shared<LibnfsBackupAdvanceParams>(srcAdvanceParams);
    backupParams.dstAdvParams = make_shared<LibnfsBackupAdvanceParams>(dstAdvanceParams);
}

void HetroRestoreJob::FillBackupCIFSConfig(BackupParams &backupParams, const HetroBackupSubJob& backupSubJob)
{
    std::shared_ptr<LibsmbBackupAdvanceParams> srcAdvParams = make_shared<LibsmbBackupAdvanceParams>();
    std::shared_ptr<LibsmbBackupAdvanceParams> dstAdvParams = make_shared<LibsmbBackupAdvanceParams>();
    backupParams.srcAdvParams = srcAdvParams;
    backupParams.dstAdvParams = dstAdvParams;

    // source device, backup is protected side
    srcAdvParams->server = m_dataFs.remoteHost[g_nodeLevelTaskInfo.GetSubTasksCount() % m_dataFs.remoteHost.size()].ip;
    srcAdvParams->share = m_dataFs.remotePath;
    srcAdvParams->version = m_generalInfo.m_protocolVersion;
    srcAdvParams->encryption = false;
    srcAdvParams->sign = false;
    srcAdvParams->timeout = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_STORAGE_CONNECTION_TIMEOUT");
    srcAdvParams->authType = "ntlmssp";
    // aggragate copy restore need copyID path prefix
    srcAdvParams->rootPath = IsAggregate() ? backupSubJob.copyId : m_dataFs.subDirPath.erase(0, 1);

    // destination device, backup is backup side
    dstAdvParams->server = m_nasShare.nasShareExt.m_serviceIP;
    dstAdvParams->version = m_generalInfo.m_protocolVersion;

    srcAdvParams->maxPendingAsyncReqCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_MAX_PENDING_ASYNC_REQ_CNT");
    srcAdvParams->serverCheckMaxCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_SERVER_CHECK_MAX_ERR_COUNT");
    srcAdvParams->maxOpenedFilehandleCount = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_MAX_OPENED_FILEHANDLE_COUNT");
    srcAdvParams->pollExpiredTime = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_POLL_EXPIRED_TIME");
    /*
    * dst sharePath in fine grained restore may contain mutiple levels
    * use the first level as share and the others as dstRootPath
    */
    pair<string, string> shareAndRootPathPair = ResolveSharePathForCIFS(m_nasShare.sharePath);
    DBGLOG("split cifs sharepath, share = %s, rootPath = %s",
        shareAndRootPathPair.first.c_str(), shareAndRootPathPair.second.c_str());
    dstAdvParams->share = shareAndRootPathPair.first;
    dstAdvParams->rootPath = shareAndRootPathPair.second;

    if (m_nasShare.nasShareExt.m_encryption == SMB_ENCRYPTION) {
        HCP_Log(DEBUG, MODULE) << "nas share encryption is true." << HCPENDLOG;
        dstAdvParams->encryption = true;
    } else {
        dstAdvParams->encryption = false;
    }
    dstAdvParams->sign = false;
    dstAdvParams->timeout = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "CIFS_PROTECTED_CONNECTION_TIMEOUT");
    if (m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        dstAdvParams->authType = "krb5";
    } else {
        dstAdvParams->authType = "ntlmssp";
    }
}

/**
 * resolve illegal share path of cifs into <sharePath, rootPath>
 * ex: when doing fine grained restore, cifs sharepath of target fs may like:
 *     /cifsshare1919810/dir1/dir2,
 * => sharePath: /cifs1919810, rootPath: dir1/dir2
 * set rootPath as dstRootPath
 */
std::pair<std::string, std::string> HetroRestoreJob::ResolveSharePathForCIFS(std::string sharePath)
{
    if (sharePath.front() == '/') {
        sharePath = sharePath.substr(1);
    }
    if (sharePath.back() == '/') {
        sharePath.pop_back();
    }
    int pos = sharePath.find('/');
    return (pos == std::string::npos) ? make_pair(sharePath, "") :
        make_pair(sharePath.substr(0, pos), sharePath.substr(pos + 1));
}

bool HetroRestoreJob::IsAggregate()
{
    return m_aggCopyExtendInfo.isAggregation == "true";
}

void HetroRestoreJob::FillBackupReplacePolicy(BackupParams &backupParams)
{
    if (m_dataLayoutExt.m_fileReplaceStrategy == REPLACE_POLICY_REPLACE_EXSISTING_FILESNFOLDERS)
        backupParams.commonParams.restoreReplacePolicy = RestoreReplacePolicy::OVERWRITE;
    else if (m_dataLayoutExt.m_fileReplaceStrategy == REPLACE_POLICY_IGNORE_EXSISTNG_FILES)
        backupParams.commonParams.restoreReplacePolicy = RestoreReplacePolicy::IGNORE_EXIST;
    else if (m_dataLayoutExt.m_fileReplaceStrategy == REPLACE_POLICY_REPLACE_ONLY_IF_OLDER)
        backupParams.commonParams.restoreReplacePolicy = RestoreReplacePolicy::OVERWRITE_OLDER;
    else
        backupParams.commonParams.restoreReplacePolicy = RestoreReplacePolicy::NONE;
}

void HetroRestoreJob::FillBackupCommonParams(BackupParams &backupParams)
{
    backupParams.commonParams.jobId = m_jobId;
    backupParams.commonParams.subJobId = m_subJobId;
    backupParams.commonParams.reqID = m_subJobRequestId;
    backupParams.commonParams.maxErrorFiles = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_NAS_SERVER_CHECK_MAX_ERR_COUNT");
    backupParams.commonParams.maxBufferCnt = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MAX_TOTAL_BLOCK_BUFFER_CNT");
    backupParams.commonParams.maxBufferSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_BACKUP_MAX_TOTAL_BLOCK_BUFFER_SIZE");
    backupParams.commonParams.writeMeta = true;

    backupParams.commonParams.failureRecordRootPath = m_cacheFsPath;

    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
        backupParams.commonParams.blockSize = ONE_MB; // ONE MB
    } else if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        backupParams.commonParams.writeAcl = true;
        int64_t errCode = 0;
        // 通过连接源端和目的端的smb，获取读和写的块的大小的最小值
        bool ret = CheckSmbConnectVersion(
            HetroCommonService::ConvertStringToSmbVersion(m_generalInfo.m_protocolVersion), errCode);
        if (ret == MP_TRUE) {
            backupParams.commonParams.blockSize = m_maxSmbBlockSize;
        } else {
            backupParams.commonParams.blockSize = DEFAULT_SMB_BLOCK_SIZE;
        }
        backupParams.commonParams.writeSparseFile = true;
    }

    FillBackupReplacePolicy(backupParams);
    HCP_Log(INFO, MODULE) <<"FillBackupCommonParams:backupParams.commonParams.metaPath:"
        << backupParams.commonParams.metaPath << HCPENDLOG;

    if (IsAggregate()) {
        HCP_Log(INFO, MODULE) <<"FillBackupCommonParams:aggregate:yes" << HCPENDLOG;
        backupParams.commonParams.backupDataFormat = BackupDataFormat::AGGREGATE;
        backupParams.commonParams.maxAggregateFileSize = std::stoul(m_aggCopyExtendInfo.maxSizeAfterAggregate);
        backupParams.commonParams.maxFileSizeToAggregate = std::stoul(m_aggCopyExtendInfo.maxSizeToAggregate);
        int aggregateThreadNum = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION, "DME_NAS_AGGR_THREAD_POOL_CNT");

        HCP_Log(INFO, MODULE) <<" FillBackupCommonParams Aggregate Info, aggregateThreadNum: "<< aggregateThreadNum <<
            " maxAggregateFileSize: " << backupParams.commonParams.maxAggregateFileSize <<
            " maxFileSizeToAggregate: " << backupParams.commonParams.maxFileSizeToAggregate << HCPENDLOG;
        if (aggregateThreadNum > MAX_AGGREGATE_NUM || aggregateThreadNum <= 0) {
            aggregateThreadNum = DEFAULT_AGGREGATE_NUM;
        }
        backupParams.commonParams.aggregateThreadNum = aggregateThreadNum;
    } else {
        HCP_Log(INFO, MODULE) <<"FillBackupCommonParams:aggregate:no" << HCPENDLOG;
        backupParams.commonParams.backupDataFormat = BackupDataFormat::NATIVE;
    }
}

void HetroRestoreJob::FillBackupConfigPhase(BackupParams &backupParams, HetroBackupSubJob &backupSubJob)
{
    if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_COPY_PHASE)
        backupParams.phase = BackupPhase::COPY_STAGE;
    else if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_HARDLINK_PHASE)
        backupParams.phase = BackupPhase::HARDLINK_STAGE;
    else if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_DELETE_PHASE)
        backupParams.phase = BackupPhase::DELETE_STAGE;
    else if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_DIRMTIME_PHASE)
        backupParams.phase = BackupPhase::DIR_STAGE;
}

void HetroRestoreJob::FillBackupConfig(BackupParams &backupParams, HetroBackupSubJob &backupSubJob)
{
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) {
        backupParams.srcEngine = BackupIOEngine::LIBNFS;
        backupParams.dstEngine = BackupIOEngine::LIBNFS;
        FillBackupNFSConfig(backupParams, backupSubJob);
    } else if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) {
        backupParams.srcEngine = BackupIOEngine::LIBSMB;
        backupParams.dstEngine = BackupIOEngine::LIBSMB;
        FillBackupCIFSConfig(backupParams, backupSubJob);
        FillBackupSmbConfigAuthInfo(backupParams, m_jobId, m_dataFs, m_nasShare, BackupType::RESTORE);
    }

    backupParams.phase = m_backupPhase;
    FillBackupCommonParams(backupParams);
    backupParams.commonParams.orderOfFilenames = static_cast<OrderOfRestore>(SafeStoi(backupSubJob.m_orderOfRestore));
    backupParams.backupType = BackupType::RESTORE;
    backupParams.scanAdvParams.metaFilePath = m_cacheFsPath + "/" + backupSubJob.dcacheAndFcachePath;
    backupParams.commonParams.metaPath = m_metaFsPath + "/" + backupSubJob.metaFilePath;
    DBGLOG("FillBackupConfig, metaFilePath: %s, metaPath: %s",
        backupParams.scanAdvParams.metaFilePath.c_str(), backupParams.commonParams.metaPath.c_str());
    backupParams.scanAdvParams.useXmetaFileHandle = false;
}

void HetroRestoreJob::ScannerCtrlFileCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    HCP_Log(DEBUG, MODULE) << "Callback Received for control File path:"<< WIPE_SENSITIVE(controlFilePath) << HCPENDLOG;
    return;
}

void HetroRestoreJob::ScannerHardLinkCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    HCP_Log(DEBUG, MODULE) << "Callback Received for control File path:"<< WIPE_SENSITIVE(controlFilePath) << HCPENDLOG;
    return;
}

void HetroRestoreJob::RestoreDirMTimeCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    HCP_Log(DEBUG, MODULE) << "Callback Received for control File path:"<< WIPE_SENSITIVE(controlFilePath) << HCPENDLOG;
    return;
}

void HetroRestoreJob::RestoreDelCtrlCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    HCP_Log(DEBUG, MODULE) << "Callback Received for control File path:"<< WIPE_SENSITIVE(controlFilePath) << HCPENDLOG;
    return;
}

bool HetroRestoreJob::CheckNasSharesReachable(int64_t &errCode)
{
    std::string proto = (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_NFS) ? "nfs" : "cifs";
    std::string protoVersion;

    if (proto == "nfs") {
        /* TO-DO */
        if (!CheckMountVersion("nfs", m_generalInfo.m_protocolVersion, errCode)) {
            HCP_Log(ERR, MODULE) << "Nas share is not reachable with specified version="
                << m_generalInfo.m_protocolVersion << HCPENDLOG;
            return false;
        }
    } else {
        Module::SmbVersion version = HetroCommonService::ConvertStringToSmbVersion(m_generalInfo.m_protocolVersion);
        if (!CheckSmbConnectVersion(version, errCode)) {
            HCP_Log(ERR, MODULE) << "Nas share is not reachable with specified version="
                << m_generalInfo.m_protocolVersion << HCPENDLOG;
            return false;
        }
    }
    return true;
}

bool HetroRestoreJob::CheckSmbConnectVersion(Module::SmbVersion version, int64_t &errCode)
{
    std::string agentHomePath = Module::EnvVarManager::GetInstance()->GetAgentHomePath();
    std::string krb5CcacheFile = agentHomePath + KRB5CCNAMEPREFIX + m_jobId;
    std::string krb5ConfigFile = agentHomePath + KRB5CONFIGPREFIX + m_jobId + KRB5CONFIGPOSTFIX;

    Module::SmbAuthType smbRmtAuthType;
    if (m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        smbRmtAuthType = Module::SmbAuthType::KRB5;
    } else if (m_nasShare.auth.authType == AuthType::type::NO_AUTHENTICATION ||
        m_nasShare.auth.authType == AuthType::type::OS_PASSWORD ||
        m_nasShare.auth.authType == AuthType::type::APP_PASSWORD) {
        smbRmtAuthType = Module::SmbAuthType::NTLMSSP;
    } else {
        HCP_Log(INFO, MODULE) << "Wrong authType for cifs share: " << m_nasShare.auth.authType << HCPENDLOG;
        return MP_FALSE;
    }
    bool smbEncryption = (m_nasShare.nasShareExt.m_encryption == SMB_ENCRYPTION) ? true : false;

    Module::SmbContextArgs rmtPars = {
        m_nasShare.nasShareExt.m_domainName, m_nasShare.nasShareExt.m_serviceIP, m_nasShare.sharePath,
        m_nasShare.auth.authkey, m_nasShare.auth.authPwd, krb5CcacheFile,
        krb5ConfigFile, smbEncryption, false, ONE_MINUTE, smbRmtAuthType, version
    };
    Module::SmbContextWrapper rmtSmb(rmtPars);
    if (!rmtSmb.Init()) {
        return MP_FALSE;
    }

    if (!rmtSmb.SmbConnect()) {
        errCode = HomoErrorCode::ERROR_NAS_SCAN_PROTECTED_SERVER_NOT_REACHABLE;
        return MP_FALSE;
    }

    Module::SmbContextArgs dtFsPars = {
        std::string(), m_dataFsSvcIp, m_dataFs.remotePath, m_dataFs.auth.authkey,
        m_dataFs.auth.authPwd, std::string(), std::string(), false,
        false, ONE_MINUTE, Module::SmbAuthType::NTLMSSP, version
    };
    Module::SmbContextWrapper dtFsSmb(dtFsPars);
    if (!dtFsSmb.Init()) {
        return MP_FALSE;
    }

    if (!dtFsSmb.SmbConnect()) {
        errCode = HomoErrorCode::ERROR_NAS_SCAN_SECONDARY_SERVER_NOT_REACHABLE;
        return MP_FALSE;
    }
    m_maxSmbBlockSize = dtFsSmb.SmbGetMaxReadSize() < rmtSmb.SmbGetMaxWriteSize() ?
        dtFsSmb.SmbGetMaxReadSize() : rmtSmb.SmbGetMaxWriteSize();
    HCP_Log(INFO, MODULE) << "maxSmbBlockSize: " << m_maxSmbBlockSize <<
        " readMaxSize:" << rmtSmb.SmbGetMaxReadSize() << " writeMaxSize:" << dtFsSmb.SmbGetMaxWriteSize() << HCPENDLOG;
    return MP_TRUE;
}

bool HetroRestoreJob::CheckMountVersion(std::string proto, std::string protoVersion, int64_t &errCode)
{
    if (!CheckRemoteNasMount(proto, protoVersion, m_nasShare, m_tempLocalMountPath)) {
        HCP_Log(ERR, MODULE) << "CheckRemoteNasMount failed" << HCPENDLOG;
        errCode = HomoErrorCode::ERROR_NAS_SCAN_PROTECTED_SERVER_NOT_REACHABLE;
        return false;
    }
    return true;
}

bool HetroRestoreJob::UpdateBackupStatistics(BackupStats &backupStatistics)
{
    m_dataSize = backupStatistics.noOfBytesCopied/NUMBER1024;
    std::string statPrintSubJobId = (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS) ?
        "[" + std::to_string(m_subJobRequestId) + "]" : "";
    UpdateBackupSubTaskStatistics(backupStatistics, m_subTaskType, m_cacheFsPath, m_subJobInfo);
    PrintBackupStatistics(backupStatistics, m_jobId, m_backupStatus,
        statPrintSubJobId);

    if (!g_nodeLevelTaskInfo.CanSendLogReportToPM(m_jobId)) {
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        return true;
    }

    HetroNativeBackupStats backupStatsResource {};
    bool bReportToPM = false;

    if (!LockBackupStatsResource(m_jobId)) {
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        return false;
    }

    if (!GetBackupStatsResource(m_jobId, backupStatsResource)) {
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        UnlockBackupStatsResource(m_jobId);
        return false;
    }

    if ((GetCurrentTimeInSeconds() - backupStatsResource.m_lastLogReportTime) > BACKUP_REPORT_CIRCLE_TIME_IN_SEC) {
            bReportToPM = true;
            backupStatsResource.m_lastLogReportTime = GetCurrentTimeInSeconds();
            UpdateBackupStatsResource(m_jobId, backupStatsResource);
    }
    UnlockBackupStatsResource(m_jobId);
    g_nodeLevelTaskInfo.UpdateLogReportTimeToPM(m_jobId);

    if (!bReportToPM) {
        ReportJobDetails(SubJobStatus::RUNNING, PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
    } else {
        ReportBackupRunningStatus(backupStatistics.noOfBytesCopied);
    }
    return true;
}

void HetroRestoreJob::PrintBackupCopyInfo(HetroNativeBackupStats &backupStatistics)
{
    if (m_jobResult != AppProtect::JobResult::type::SUCCESS) {
        HCP_Log(INFO, MODULE)
            << "\n\n----------------"
            << "\nRestore Job Report"
            << "\n------------------"
            << "\nStatus    : Restore failed"
            << "\nJob Id    : " << m_jobId
            << "\n\n" << HCPENDLOG;
        PrintFinalScannerStats(m_scanStats);
        PrintFinalBackupStats(backupStatistics);
        return;
    }

    time_t jobEndTime = GetCurrentTimeInSeconds();
    std::string jobStartTimeStr = FormatTimeToStr(m_generalInfo.m_jobStartTime);
    std::string jobEndTimeStr = FormatTimeToStr(jobEndTime);
    int32_t jobDuration = double(jobEndTime - m_generalInfo.m_jobStartTime);
    m_totalJobDuration = jobDuration;

    HCP_Log(INFO, MODULE)
        << "\n\n----------------"
        << "\nRestore Job Report"
        << "\n------------------"
        << "\nStatus                    : Restore successful"
        << "\nJob Start time            : " << jobStartTimeStr
        << "\nJob End time              : " << jobEndTimeStr
        << "\nJob Duration (seconds)    : " << jobDuration
        << "\nJob Id                    : " << m_jobId
        << "\n\n" << HCPENDLOG;

    PrintFinalScannerStats(m_scanStats);
    PrintFinalBackupStats(backupStatistics);
    return;
}

bool HetroRestoreJob::KinitTGT()
{
    HCP_Log(DEBUG, MODULE) << "Need or no kinit, m_jobCtrlPhase: " << m_jobCtrlPhase << ", m_protocol: "
        << m_nasShare.nasShareExt.m_protocol << ", authType: " << m_nasShare.auth.authType << HCPENDLOG;
    if (m_jobCtrlPhase == JOB_CTRL_PHASE_POSTJOB ||
        m_nasShare.nasShareExt.m_protocol != NAS_PROTOCOL_TYPE_E_CIFS ||
        m_nasShare.auth.authType != AuthType::type::KERBEROS) {
        HCP_Log(DEBUG, MODULE) << "Don't need kerberos ticket." << HCPENDLOG;
        return true;
    }
    if (!Module::JsonHelper::JsonStringToStruct(m_restoreJobPtr->targetObject.auth.extendInfo,
        m_nasShare.nasShareAuthExt)) {
        HCP_Log(ERR, MODULE) << "JsonStringToStruct failed." << HCPENDLOG;
        return false;
    }
    /* set krb5 environment variable for krb5.conf */
    if (KinitTGTInner(m_nasShare.auth.authkey, m_nasShare.nasShareAuthExt.secret,
        m_nasShare.nasShareAuthExt.keytab,
        m_nasShare.nasShareAuthExt.krb5Conf, m_jobId) != MP_SUCCESS) {
        HCP_Log(ERR, MODULE) << "Kinit TGT ticket FAILED." << HCPENDLOG;
        return false;
    }
    HCP_Log(INFO, MODULE) << "Kinit TGT ticket SUCCESS." << HCPENDLOG;

    return true;
}

void HetroRestoreJob::KeepPluginAlive()
{
    HCP_Log(INFO, MODULE) << "Enter KeepPluginAlive" << HCPENDLOG;
    ActionResult result;
    SubJobDetails subJobDtls;
    LogDetail logDetail{};
    std::vector<LogDetail> logDetails;
    uint32_t reportCnt = 0;
    while (!m_isAbort && !m_generateSubjobFinish) {
        // 10s 检查一次退出条件
        std::this_thread::sleep_for(std::chrono::seconds(REPORT_RUNNING_INTERVAL));
        // 60s 上报一次
        if (reportCnt % REPORT_RUNNING_TIMES == 0) {
            REPORT_LOG2AGENT(subJobDtls, result, logDetails, logDetail, 0, 0, SubJobStatus::RUNNING);
        }
        reportCnt++;
    }
    INFOLOG("Keep Alive thread exit!, %d, %d", m_isAbort.load(), m_generateSubjobFinish.load());
}

int HetroRestoreJob::HandlePrerequisiteJobFailed(const int64_t errCode)
{
    if (m_nasShare.nasShareExt.m_protocol == NAS_PROTOCOL_TYPE_E_CIFS
        && m_nasShare.auth.authType == AuthType::type::KERBEROS) {
        CleanKrbFilesForCifs(m_jobId);
    }
    ReportJobDetailsWithErrorCode(SubJobStatus::FAILED, PROGRESS0,
        "nas_plugin_hetro_restore_prepare_fail_label", JobLogLevel::TASK_LOG_ERROR, errCode);
    return MP_FAILED;
}

int HetroRestoreJob::CleanAndReportDetailedStatus(const HetroBackupSubJob &backupSubJob,
    SubJobStatus::type jobStatus, std::string &jobLogLabel, std::string mountPath, BackupStats &backupStatistics)
{
    if (backupSubJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_DELETE_PHASE)
        UnmountRemoteShare(mountPath);
    // for clean code, move to a funciton
    HandleExecuteInnerError();
    m_dataSize = backupStatistics.noOfBytesCopied/NUMBER1024;
    ReportJobProgress(jobStatus, jobLogLabel);
    return MP_SUCCESS;
}