/*
* This file is a part of the open-eBackup project.
* This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
* If a copy of the MPL was not distributed with this file, You can obtain one at
* http://mozilla.org/MPL/2.0/.
*
* Copyright (c) [2024] Huawei Technologies Co.,Ltd.
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
*/
#include "ObjectStorageBackupJob.h"
#include <filesystem>
#include "JsonFileTool.h"
#include "parser/BucketLogParser.h"
#include "component/ResourceManager.h"
#include "utils/CertManager.h"

#ifdef WIN32
namespace fs = std::filesystem;
#else
namespace fs = boost::filesystem;
#endif

using namespace Module;
using namespace OBSPlugin;

#define ENTER                                                                                                   \
{                                                                                                               \
    m_mainJobRequestId = GenerateHash(m_jobId);                                                                 \
    HCPTSP::getInstance().reset(m_mainJobRequestId);                                                            \
    INFOLOG("Enter %s, jobId: %s, subJobId: %s", m_jobCtrlPhase.c_str(), m_jobId.c_str(), m_subJobId.c_str());  \
}

#define EXIT                                                                                                    \
{                                                                                                               \
    INFOLOG("Exit %s, jobId: %s, subJobId: %s", m_jobCtrlPhase.c_str(), m_jobId.c_str(), m_subJobId.c_str());   \
}

namespace {
    const std::string MODULE = "ObjectStorageBackup";
    const std::string TRUE_STR = "true";
    const std::string FALSE_STR = "false";
    const std::string OBS_PREVIOUS = "previous";
    const std::string OBS_LATEST = "latest";
    constexpr uint32_t BACKUP_RETRY_CNT = 3;
    constexpr uint32_t SCANNER_REPORT_CIRCLE_TIME = 60;  /* seconds */
    constexpr uint32_t NUMBER1 = 1;
    constexpr uint32_t NUMBER3 = 3;
    constexpr uint32_t NUMBER10 = 10;
    constexpr uint32_t NUMBER50 = 50;
    constexpr uint32_t NUMBER256 = 256;
    constexpr uint32_t NUMBER1024 = 1024;
    constexpr uint32_t SCANSTATUS = 0;
    constexpr uint32_t DATASTATUS = 1;
    constexpr uint32_t METASTATUS = 2;
    const std::string OBJECT_LIST_FILE_UNIQUE_DIR_NAME = "unique";
    const std::string SCAN_SUB_JOB_RESTORE_INFO_FILE_NAME_SUFFIX = "_scan_restore_info.json";
}

EXTER_ATTACK int ObjectStorageBackupJob::CheckBackupJobType()
{
    if (!GetBackupJobInfo()) {
        return Module::FAILED;
    }
    SetJobCtrlPhase(JOB_CTRL_PHASE_CHECKBACKUPJOBTYPE);

    ENTER;
    int ret = CheckBackupJobTypeInner();
    EXIT;
    return ret;
}

EXTER_ATTACK int ObjectStorageBackupJob::PrerequisiteJob()
{
    if (!GetBackupJobInfo()) {
        SetJobToFinish();
        return Module::FAILED;
    }
    SetJobCtrlPhase(JOB_CTRL_PHASE_PREJOB);

    ENTER;
    bool keepAlive = true;
    std::thread keepSubJobAliveThread = std::thread(
        &ObjectStorageBackupJob::KeepBackupSubJobAlive, this, std::ref(keepAlive));
    int ret = PrerequisiteJobInner();
    keepAlive = false;
    keepSubJobAliveThread.join();
    EXIT;

    if (ret != Module::SUCCESS) {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_prepare_fail_label",
            ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR);
    } else {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_INFO, SubJobStatus::COMPLETED, OBSPlugin::PROGRESS100),
            "object_storage_plugin_backup_prepare_succeed_label", 0);
    }
    SetJobToFinish();
    return ret;
}

EXTER_ATTACK int ObjectStorageBackupJob::GenerateSubJob()
{
    if (!GetBackupJobInfo()) {
        SetJobToFinish();
        return Module::FAILED;
    }
    SetJobCtrlPhase(JOB_CTRL_PHASE_GENSUBJOB);
    ENTER;
    int ret = GenerateSubJobInner();
    EXIT;

    if (ret != Module::SUCCESS) {
        ReportJobDetails(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, OBSPlugin::PROGRESS0);
    } else {
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::COMPLETED, OBSPlugin::PROGRESS100);
    }

    SetJobToFinish();
    return ret;
}

EXTER_ATTACK int ObjectStorageBackupJob::ExecuteSubJob()
{
    if (!GetBackupJobInfo()) {
        SetJobToFinish();
        return Module::FAILED;
    }
    SetJobCtrlPhase(JOB_CTRL_PHASE_EXECSUBJOB);

    ENTER;
    bool keepAlive = true;
    std::thread keepSubJobAliveThread = std::thread(
        &ObjectStorageCommonService::KeepSubJobAlive, this, std::ref(keepAlive));
    int ret = ExecuteSubJobInner();
    keepAlive = false;
    keepSubJobAliveThread.join();
    EXIT;

    SetJobToFinish();
    return ret;
}

EXTER_ATTACK int ObjectStorageBackupJob::PostJob()
{
    if (!GetBackupJobInfo()) {
        SetJobToFinish();
        return Module::FAILED;
    }
    SetJobCtrlPhase(JOB_CTRL_PHASE_POSTJOB);

    ENTER
    int ret = PostJobInner();
    EXIT

    SetJobToFinish();
    return ret;
}

void ObjectStorageBackupJob::KeepBackupSubJobAlive(bool& keepAlive)
{
    HCP_Log(INFO, MODULE) << "Start keep alive for task " << m_jobId << " sub task " << m_subJobId << HCPENDLOG;
    while (keepAlive) {
        ReportJobDetailsWithLabel(SubJobStatus::RUNNING, OBSPlugin::PROGRESS0, "", JobLogLevel::TASK_LOG_INFO);
        std::this_thread::sleep_for(std::chrono::seconds(EXECUTE_SUBTASK_MONITOR_DUR_IN_SEC));
    }
    HCP_Log(INFO, MODULE) << "End keep alive for task " << m_jobId << " sub task " << m_subJobId << HCPENDLOG;
}

bool ObjectStorageBackupJob::GetBackupJobInfo()
{
    m_backupJobPtr = std::dynamic_pointer_cast<AppProtect::BackupJob>(GetJobInfo()->GetJobInfo());
    if (m_backupJobPtr == nullptr) {
        HCP_Log(ERR, MODULE) << "Failed to get backupJobPtr." << HCPENDLOG;
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_data_fail_label",
            ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        return false;
    }
    SetMainJobId(m_backupJobPtr->jobId);
    SetSubJobId();
    return true;
}

int ObjectStorageBackupJob::CheckBackupJobTypeInner()
{
    HCP_Log(INFO, MODULE) << "backupJobType: " << (IsFullBackup() ? "FULL" : "INC") << HCPENDLOG;

    /* For every full backupjob, UBC deletes old DTREE and creates a new DTREE. So always return success for FULL */
    if (IsFullBackup()) {
        return Module::SUCCESS;
    }

    if (!InitJobInfo()) {
        return Module::FAILED;
    }

    if (NeedChangeIncToFull()) {
        HCP_Log(WARN, MODULE) << "Need change INC to FULL,jobId:" << m_jobId << HCPENDLOG;
        return Module::FAILED;
    }

    return Module::SUCCESS;
}

bool ObjectStorageBackupJob::IsFullBackup() const
{
    return (m_backupJobPtr->jobParam.backupType == AppProtect::BackupJobType::FULL_BACKUP);
}

bool ObjectStorageBackupJob::InitJobInfo()
{
    if (!GetAuthExtendInfo()) {
        return false;
    }

    /* Protected object storage details */
    if (!InitObjectStorageInfo()) {
        return false;
    }

    CloseAggregateSwitchByBackupType();

    /* MetaFs and BackupFs to be used */
    if (!InitMetaDataCacheBackupFs()) {
        return false;
    }

    /* Config the repository IP and path */
    if (!InitRepoPaths()) {
        return false;
    }

    m_lastBackupTime = GetCurrentTimeInSeconds();
    DBGLOG("InitJobInfo, m_lastBackupTime: %s", ConvertToReadableTime(m_lastBackupTime).c_str());
    PrintJobInfo();

    return true;
}

bool ObjectStorageBackupJob::InitCloudClient()
{
    std::string certContent = OBSPlugin::Base64Decode(m_authInfo.certification);
    CertManager certMgr(m_certFilePath, m_jobId);
    certMgr.SaveFile(certContent);
    Module::StorageConfig storageConfig;
    storageConfig.storageType = m_authInfo.getStorageType();
    storageConfig.verifyInfo.accessKey = m_authInfo.ak;
    storageConfig.verifyInfo.secretKey = m_authInfo.sk;
    storageConfig.verifyInfo.endPoint = m_authInfo.endPoint;
    storageConfig.verifyInfo.useHttps = m_authInfo.openHttps();
    storageConfig.verifyInfo.certHttps = certContent;
    storageConfig.verifyInfo.caPath = certMgr.GetCAPath();
    storageConfig.verifyInfo.caFile = certMgr.GetCAFile();
    storageConfig.verifyInfo.useProxy = m_authInfo.openProxy();
    storageConfig.verifyInfo.proxyHostName = m_authInfo.proxyHostName;
    storageConfig.verifyInfo.proxyUserName = m_authInfo.proxyUserName;
    storageConfig.verifyInfo.proxyUserPwd = m_authInfo.proxyUserPwd;
    m_cloudClient = CloudServiceManager::CreateInst(storageConfig);
    if (m_cloudClient == nullptr) {
        certMgr.RemoveCertFile();
        ERRLOG("CreateInst Failed");
        return false;
    }
    return true;
}

bool ObjectStorageBackupJob::GetAuthExtendInfo()
{
    if (!Module::JsonHelper::JsonStringToStruct(m_backupJobPtr->protectEnv.auth.extendInfo, m_authInfo)) {
        HCP_Log(ERR, MODULE) << "parse GetAuthExtendInfo failed. authExtendInfo is: "
            << WIPE_SENSITIVE(m_backupJobPtr->protectEnv.auth.extendInfo)
            << "jobId is: " << m_backupJobPtr->jobId << HCPENDLOG;
        return false;
    }
    HCP_Log(INFO, MODULE) << "storageType " << m_authInfo.storageType << HCPENDLOG;

    return true;
}

bool ObjectStorageBackupJob::NeedContinueCheckpoint()
{
    return m_protectedOBS.advParms.m_checkPoint == "true" ? true : false;
}

bool ObjectStorageBackupJob::InitObjectStorageInfo()
{
    if (m_backupJobPtr->protectObject.extendInfo.empty()) {
        ERRLOG("Invalid object storage info, protectObject is empty");
        return false;
    }

    HCP_Log(INFO, MODULE) << "protectObject.extendInfo " << m_backupJobPtr->protectObject.extendInfo << HCPENDLOG;
    std::string protectedOBSInfo = m_backupJobPtr->protectObject.extendInfo;
    // 需要约束用户不允许输入带'\'字符的前缀（'\'、'['、']'在正则表达式中有特殊含义，需要加'\'前缀转义）
    protectedOBSInfo = std::regex_replace(protectedOBSInfo, std::regex(R"(\\")"), R"(")");
    protectedOBSInfo = std::regex_replace(protectedOBSInfo, std::regex(R"("\[)"), "[");
    protectedOBSInfo = std::regex_replace(protectedOBSInfo, std::regex(R"(\]")"), "]");
    if (!Module::JsonHelper::JsonStringToStruct(protectedOBSInfo, m_protectedOBS)) {
        ERRLOG("protectedOBSInfo JsonStringToStruct failed");
        return false;
    }

    HCP_Log(INFO, MODULE) << "obsProtectSubObjectList.size() "
        << m_protectedOBS.obsProtectSubObjectList.size() << HCPENDLOG;
    for (auto& object : m_protectedOBS.obsProtectSubObjectList) {
        HCP_Log(INFO, MODULE) << "protect bucket name " << object.bucketName << HCPENDLOG;
        auto deduplicatePrefixs = DeduplicatePrefixs(object.prefixs);
        object.prefixs.assign(deduplicatePrefixs.begin(), deduplicatePrefixs.end());
    }

    HCP_Log(INFO, MODULE) << "m_backupJobPtr->extendInfo " << m_backupJobPtr->extendInfo << HCPENDLOG;
    if (!Module::JsonHelper::JsonStringToStruct(m_backupJobPtr->extendInfo, m_protectedOBS.advParms)) {
        ERRLOG("extendInfo JsonStringToStruct failed");
        return false;
    }
    if (IsMultiNodeBackup()) {
        m_protectedOBS.advParms.m_prefixSplitter = GetConfigStrKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_PREFIX_SPLITTER");
        m_protectedOBS.advParms.m_prefixSplitDepth = std::to_string(GetConfigIntKey(
            OBSPlugin::DME_OBS_CONFIG_SECTION, "DME_OBS_PREFIX_SPLIT_DEPTH"));
        INFOLOG("multiNodeBackupSwitch opened, prefixSplitter: %s, prefixSplitDepth = %s",
            m_protectedOBS.advParms.m_prefixSplitter.c_str(),
            m_protectedOBS.advParms.m_prefixSplitDepth.c_str());
        StorageGeneralInfo generalInfo;
        if (!CreateStorageGeneralResources(m_jobId, generalInfo)) {
            return false;
        }
    }

    if (m_protectedOBS.obsProtectSubObjectList.empty() && m_jobCtrlPhase == JOB_CTRL_PHASE_PREJOB) {
        ERRLOG("Protected bucket list is empty, init object storage info failed!");
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_WARNING, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_data_fail_label",
            ObsErrorCode::BACKUP_PARAMETER_CHECK_FAILED_ERROR_CODE);
        return false;
    }

    return true;
}

std::vector<std::string> ObjectStorageBackupJob::DeduplicatePrefixs(const std::vector<std::string>& prefixs)
{
    std::vector<std::string> deduplicatedPrefixs;

    if (prefixs.size() == 0) {
        return deduplicatedPrefixs;
    }

    auto isSubStr = [] (const std::string &shortStr, const std::string &longStr) -> bool {
        return longStr.find(shortStr) == 0;
    };

    std::vector<std::string> sortedPrefixed(prefixs.begin(), prefixs.end());
    sort(sortedPrefixed.begin(), sortedPrefixed.end());               // 按字典序从小到大排序
    std::vector<bool> duplicatedRecord(sortedPrefixed.size(), false); // 和sortedPrefixed位置上一一对应，false表示当前位置
                                                                      // 上的元素与剩余元素都互不为字串，算法参考筛法取素数
                                                                      // 的思路
    for (std::size_t i = 0; i < sortedPrefixed.size(); ++i) {
        if (duplicatedRecord[i]) {
            continue;
        }

        deduplicatedPrefixs.emplace_back(sortedPrefixed[i]);
        HCP_Log(INFO, MODULE) << "Add deduplicate prefix " << sortedPrefixed[i] << HCPENDLOG;

        for (std::size_t j = i + 1; j < sortedPrefixed.size(); ++j) {
            if (isSubStr(sortedPrefixed[i], sortedPrefixed[j])) {
                duplicatedRecord[j] = true; // 标记较长重复子串
            } else {
                break; // 由于已按字典序排序，重复子串一定是连续的，如果出现了非重复子串，则之后也不会再出现重复子串
            }
        }
    }

    return deduplicatedPrefixs;
}

void ObjectStorageBackupJob::CloseAggregateSwitchByBackupType()
{
    if ((m_backupJobPtr->jobParam.backupType == AppProtect::BackupJobType::PERMANENT_INCREMENTAL_BACKUP) &&
        IsAggregate()) {
        WARNLOG("Automatically close the aggregate feature in the case of permanent incremental backup!");
        m_protectedOBS.advParms.m_aggregateSwitch = FALSE_STR;
        m_protectedOBS.advParms.m_maxSizeAfterAggregate = "0";
        m_protectedOBS.advParms.m_maxSizeToAggregate = "0";
    }
}

bool ObjectStorageBackupJob::InitMetaDataCacheBackupFs()
{
    for (unsigned int i = 0; i < m_backupJobPtr->repositories.size(); i++) {
        HCP_Log(DEBUG, MODULE) << "repositoryType - "
            << static_cast<int>(m_backupJobPtr->repositories[i].repositoryType) << HCPENDLOG;
        for (const auto& path : m_backupJobPtr->repositories[i].path) {
            HCP_Log(DEBUG, MODULE) << "path - " << path << HCPENDLOG;
        }
        if (m_backupJobPtr->repositories[i].repositoryType == RepositoryDataType::CACHE_REPOSITORY) {
            m_cacheFs = m_backupJobPtr->repositories[i];
        } else if (m_backupJobPtr->repositories[i].repositoryType == RepositoryDataType::DATA_REPOSITORY) {
            m_multiDataFsList.emplace_back(m_backupJobPtr->repositories[i]);
        } else if (m_backupJobPtr->repositories[i].repositoryType == RepositoryDataType::META_REPOSITORY) {
            if (m_backupJobPtr->repositories[i].role == RepositoryRole::REPO_MASTER) {
                m_metaFs = m_backupJobPtr->repositories[i];
            }
            m_multiMetaFsList.emplace_back(m_backupJobPtr->repositories[i]);
        }
    }

    if (m_metaFs.path.empty() || m_multiDataFsList.size() == 0 || m_cacheFs.path.empty()) {
        HCP_Log(ERR, MODULE) << "Received info is wrong, m_multiDataFsList size: " << m_multiDataFsList.size()
                             << ", m_cacheFs.path.size(): " << m_cacheFs.path.size()
                             << ", m_metaFs.path.size(): " << m_metaFs.path.size() << HCPENDLOG;
        return false;
    }

    return true;
}

bool ObjectStorageBackupJob::InitRepoPathsByEsn(const std::vector<StorageRepository>& multiFsList,
    std::unordered_map<std::string, std::string>& fsPathMap)
{
    for (const auto& dataFs : multiFsList) {
        Json::Value extendInfoJsonVal;
        if (!Module::JsonHelper::JsonStringToJsonValue(dataFs.extendInfo, extendInfoJsonVal)) {
            HCP_Log(ERR, MODULE) << "Convert to extendInfoJsonVal failed." << HCPENDLOG;
            return false;
        }

        if (!(extendInfoJsonVal.isObject()
            && extendInfoJsonVal.isMember("esn")
            && extendInfoJsonVal["esn"].isString())) {
            HCP_Log(ERR, MODULE) << "Json change failed." << HCPENDLOG;
            return false;
        }

        const std::string dataFsEsn = extendInfoJsonVal["esn"].asString();
        const std::string dataFsPath = IsAggregate()
                ? (dataFs.path[(m_numberOfSubTask++) % dataFs.path.size()] + dir_sep + m_backupJobPtr->copy.id)
                : dataFs.path[(m_numberOfSubTask++) % dataFs.path.size()];
        if (fsPathMap.count(dataFsEsn) == 0) {
            fsPathMap[dataFsEsn] = dataFsPath;
        } else {
            HCP_Log(ERR, MODULE) << "exist multiple fs on one esn" << HCPENDLOG;
            return false;
        }
        HCP_Log(INFO, MODULE) << "add fsPath: " << dataFsPath << " with esn " << dataFsEsn << HCPENDLOG;
    }
    return true;
}

bool ObjectStorageBackupJob::InitRepoPaths()
{
    if (!InitRepoPathsByEsn(m_multiDataFsList, m_dataFsPathMap)) {
        HCP_Log(ERR, MODULE) << "Init data repo path failed." << HCPENDLOG;
        return false;
    }
    if (!InitRepoPathsByEsn(m_multiMetaFsList, m_metaFsPathMap)) {
        HCP_Log(ERR, MODULE) << "Init meta repo path failed." << HCPENDLOG;
        return false;
    }

    for (auto& it : m_dataFsPathMap) {
        HCP_Log(INFO, MODULE) << "Init dataFsPath:" << it.second << ", esn:" << it.first << HCPENDLOG;
    }
    for (auto& it : m_metaFsPathMap) {
        HCP_Log(INFO, MODULE) << "Init metaFsPath:" << it.second << ", esn:" << it.first << HCPENDLOG;
    }

    m_metaFsPath = IsAggregate() ? (m_metaFs.path[0] + dir_sep + m_backupJobPtr->copy.id) : m_metaFs.path[0];
    m_backupCopyInfoFilePath = PathJoin(m_metaFs.path[0], BACKUP_COPY_METAFILE);
    HCP_Log(DEBUG, MODULE) << " m_metaFs.remotePath: " << m_metaFs.remotePath << HCPENDLOG;
    HCP_Log(INFO, MODULE) << " m_metaFsPath: " << m_metaFsPath << HCPENDLOG;

    // plugin can use any mounted cache path given by agent ,so using first one
    m_cacheFsPath = m_cacheFs.path[0];
    // sample path : /mnt/databackup/ObjectSet/3c6817e0-04eb-47bc-b52c-21e46c082ce1/cache/
    // ObjectStorage_CacheDataRepository/54d634db9eea45f4b0e9cf2a621ab47f/172.17.128.1/
    // 3c6817e0-04eb-47bc-b52c-21e46c082ce1 ---- cacheFsPath尾部携带了副本id目录
    HCP_Log(INFO, MODULE) << "m_cacheFsPath: " << m_cacheFsPath << HCPENDLOG;

    m_scanMetaPath = PathJoin(m_cacheFsPath, "backup-job", "scan", "meta");
    m_scanControlPath = PathJoin(m_cacheFsPath, "backup-job", "scan", "ctrl");
    m_backupControlPath = PathJoin(m_cacheFsPath, "backup-job", "backup", "ctrl");
    m_statisticsPath = PathJoin(m_cacheFsPath, "obs-statistics");
    m_scanStatisticsPath = PathJoin(m_cacheFsPath, "scan-statistics");
    m_backupObjectInfoFilePath = PathJoin(m_cacheFsPath, "PrefixInfo", m_jobId + "backupObjectInfo.json");
    m_bucketLogDir = PathJoin(m_cacheFsPath, "logIncBackup", "bucketLog");
    m_objectListDir = PathJoin(m_cacheFsPath, "logIncBackup", "objectList");
    m_certFilePath = PathJoin(m_cacheFsPath, "cert");

    HCP_Log(INFO, MODULE) << "m_checkPoint "<<m_protectedOBS.advParms.m_checkPoint <<
    " m_retryNum: " << m_protectedOBS.advParms.m_retryNum << HCPENDLOG;

    if (NeedContinueCheckpoint()) {
        if (!GetCheckPointRecord()) {
            HCP_Log(ERR, MODULE) << "GetRetryTimes failed" << HCPENDLOG;
            return false;
        }
    }

    return true;
}

bool ObjectStorageBackupJob::GetCheckPointRecord()
{
    m_subCheckPath = PathJoin(GetPathName(m_cacheFsPath), m_jobId, "checkpoint");
    m_subCheckMetaPath = PathJoin(m_subCheckPath, "meta");
    m_checkPath = m_subCheckPath + dir_sep + "CheckPoint_State.json";
    HCP_Log(DEBUG, MODULE) << "m_subCheckPath " << m_subCheckPath << " m_checkPath "<< m_checkPath << HCPENDLOG;
    
    if (!fs::is_regular_file(m_checkPath)) {
        CreateDirectory(m_subCheckPath);
        CreateDirectory(m_subCheckMetaPath);
        std::ofstream jsonFile(m_checkPath);
        if (!jsonFile.is_open()) {
            HCP_Log(ERR, MODULE) << "Create checkpoint file failed. " << m_checkPath << HCPENDLOG;
        }
        jsonFile.close();
        ObjectStorageCheckPointRecord checkRecord;
        for (uint32_t i = 0; i < NUMBER3; i++) {
            checkRecord.errorCode.push_back("false");
        }
        WriteToCache(checkRecord);
        HCP_Log(INFO, MODULE) << "First time checkpoint backup" << HCPENDLOG;
    }
    ReadFromCache();
    if (m_checkPointRecord.errorCode.empty()) {
        for (uint32_t i = 0; i < NUMBER3; i++) {
            m_checkPointRecord.errorCode.push_back("false");
        }
    }
    return true;
}

bool ObjectStorageBackupJob::ReadFromCache()
{
    if (!JsonFileTool::ReadFromFile(m_checkPath, m_checkPointRecord)) {
        HCP_Log(ERR, MODULE) << "Read checkPointRecord from file failed" << HCPENDLOG;
        return false;
    }

    return true;
}

bool ObjectStorageBackupJob::WriteToCache(ObjectStorageCheckPointRecord& checkRecord)
{
    if (!JsonFileTool::WriteToFile(checkRecord, m_checkPath)) {
        ERRLOG("WriteBackupObjectInfoToFile failed");
        return false;
    }

    return true;
}

bool ObjectStorageBackupJob::IsAggregate() const
{
    return m_protectedOBS.advParms.m_aggregateSwitch == TRUE_STR;
}

void ObjectStorageBackupJob::PrintJobInfo() const
{
    INFOLOG("jobPhase: %s, jobId: %s, subJobId: %s", m_jobCtrlPhase.c_str(), m_jobId.c_str(), m_subJobId.c_str());
    INFOLOG("backupJobType: %s", (IsFullBackup() ? "FULL" : "INC"));
    INFOLOG("copy.id: %s", m_backupJobPtr->copy.id.c_str());
    for (const auto& object : m_protectedOBS.obsProtectSubObjectList) {
        INFOLOG("protected bucket name: %s", object.bucketName.c_str());
    }
}

bool ObjectStorageBackupJob::NeedChangeIncToFull()
{
    if (!GetPrevBackupCopyInfo()) {
        WARNLOG("Failed to get prev backup copy info, force change INC to FULL, jobId: %s", m_jobId.c_str());
        return true;
    }

    if (IsProtectedObjectsChanged()) {
        WARNLOG("Protected objects changed, force change INC to FULL, jobId: %s", m_jobId.c_str());
        return true;
    }

    if (IsAclTypeChanged()) {
        WARNLOG("ACL backup type changed, force change INC to FULL, jobId: %s", m_jobId.c_str());
        return true;
    }
    if (IsMultiNodeBackupSwitchChanged()) {
        WARNLOG("MultiNode backup switch changed, force change INC to FULL, jobId: %s", m_jobId.c_str());
        return true;
    }
    return false;
}

bool ObjectStorageBackupJob::GetPrevBackupCopyInfo()
{
    if (IsFullBackup()) {
        return true;
    }

    if (!JsonFileTool::ReadFromFile(m_backupCopyInfoFilePath, m_prevBackupRecord)) {
        HCP_Log(ERR, MODULE) << "Read Backup Copy meta info from file failed" << HCPENDLOG;
        return false;
    }

    return true;
}

int ObjectStorageBackupJob::GetPrevBackupInfo(const ObjectStorageBackupSubJob& subJob)
{
    if (IsFullBackup()) {
        return Module::SUCCESS;
    }

    if (!GetPrevBackupCopyInfo()) {
        HCP_Log(ERR, MODULE) << "Get previous backup copyInfo failed" << HCPENDLOG;
        return Module::FAILED;
    }

    const std::string metaPath = m_metaFsPathMap[subJob.esn];

    for (const auto& prefixInfo : subJob.GetFlatPrefixInfos()) {
        const std::string prefixSubStr = GetScanGenFileRelativeDir(prefixInfo);
        HCP_Log(INFO, MODULE) << "Begin Copy previous info! : " << prefixSubStr
            <<", metaPath:"<< metaPath << ",esn:"<< subJob.esn << HCPENDLOG;
        bool isUnzipping = true;
        bool isUnzipSuccess = true;
        std::thread monitorCopyThread = std::thread(&ObjectStorageBackupJob::CopyPreviousMetaFile,
            this, prefixSubStr, metaPath, std::ref(isUnzipping), std::ref(isUnzipSuccess));

        while (isUnzipping) {
            HCP_Log(INFO, MODULE) << "Wait for " << prefixSubStr << " unzip finish!" << HCPENDLOG;
            SendJobReportForAliveness();
            std::this_thread::sleep_for(std::chrono::seconds(GENERATE_SUBTASK_MONITOR_DUR_IN_SEC));
        }
        monitorCopyThread.join();

        if (!isUnzipSuccess) {
            ReportJobDetailsWithLabelAndErrcode(
                std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, OBSPlugin::PROGRESS0),
                "object_storage_plugin_backup_data_fail_label",
                ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR);
            return Module::FAILED;
        }
    }

    return Module::SUCCESS;
}

int ObjectStorageBackupJob::CopyPreviousMetaFile(
    const std::string& prefixSubStr, const std::string& metaPath, bool& isUnzipping, bool& isSuccess)
{
    HCPTSP::getInstance().reset(m_subJobRequestId);
    std::string execCmd;
    std::string workDir = m_scanMetaPath + prefixSubStr + OBS_PREVIOUS;
    CreateDirectory(workDir);
    std::string dirMetaPath;
    if (IsAggregate()) {
        Copy lastCopy {};
        if (!QueryPreviousCopy(lastCopy)) {
            isUnzipping = false;
            isSuccess = false;
            return Module::FAILED;
        }
        dirMetaPath = GetPathName(metaPath) + dir_sep + lastCopy.id + "/filemeta" + prefixSubStr;
    } else {
        dirMetaPath =  metaPath + "/filemeta" + prefixSubStr;
    }

    std::string metaFile = dirMetaPath + "/meta_file_0";
    if (OBSPlugin::IsFileExist(metaFile)) {
        HCP_Log(INFO, MODULE) << "Copy meta data to dir previous." << HCPENDLOG;
        isSuccess = CopyUnCompressMeta(dirMetaPath, workDir) == Module::SUCCESS ? true : false;
        isUnzipping = false;
        return Module::SUCCESS;
    }

    HCP_Log(INFO, MODULE) << "Unzip MetaFile to previous, IsAgg:"<< IsAggregate() <<", now create workDir:" << workDir
        << ", dirMetaPath:" << dirMetaPath << HCPENDLOG;
    std::string dirCacheZipFileName = StandardPath(dirMetaPath + "/metafile_DIRCACHE.gz");
    std::string fCacheZipFileName = StandardPath(dirMetaPath + "/metafile_FILECACHE.gz");
    std::string metaZipFilename  = StandardPath(dirMetaPath + "/metafile_META.gz");
    std::string xmetaZipFileName = StandardPath(dirMetaPath + "/metafile_XMETA.gz");
    execCmd = PrepareUnZipCommand(workDir, dirCacheZipFileName, fCacheZipFileName, metaZipFilename, xmetaZipFileName);

    HCP_Log(INFO, MODULE) << "Unzip MetaFile to dir previous, unzip cmd:" << execCmd << HCPENDLOG;

    isSuccess = CheckUnzipMetafileSucceed(execCmd, workDir);
    isUnzipping = false;
    HCP_Log(INFO, MODULE) << "Exit CopyPreviousMetaFile" << HCPENDLOG;
    return Module::SUCCESS;
}

int ObjectStorageBackupJob::PrerequisiteJobInner()
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);

    if (!IsPigzExist()) {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, OBSPlugin::PROGRESS0),
            "plugin_check_pigz_failed_label", 0);
        return Module::FAILED;
    }

    if (!InitJobInfo()) {
        HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
        return Module::FAILED;
    }

    OBSPlugin::CreateDirectory(m_failureRecordRoot);

    PrintJobInfo();

    if (!CreateSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        HCP_Log(ERR, MODULE) << "CreateSharedResources failed" << HCPENDLOG;
        return Module::FAILED;
    }
    m_generalInfo.m_jobStartTime = GetCurrentTimeInSeconds();

    if (!SetupDataFsForBackupJob() || !SetupMetaFsForBackupJob()) {
        HCP_Log(ERR, MODULE) << "SetupDataFsForBackupJob or SetupMetaFsForBackupJob failed" << HCPENDLOG;
        return Module::FAILED;
    }

    if (!UpdateGeneralResource(m_jobId, m_generalInfo)) {
        HCP_Log(ERR, MODULE) << "UpdateGeneralResource failed" << HCPENDLOG;
        return Module::FAILED;
    }

    if (!GetBucketLogs()) {
        HCP_Log(ERR, MODULE) << "GetBucketLogs failed" << HCPENDLOG;
        return Module::FAILED;
    }
    
    return Module::SUCCESS;
}

bool ObjectStorageBackupJob::SetupDataFsForBackupJob()
{
    for (const auto& dataFsPathPair : m_dataFsPathMap) {
        if (!OBSPlugin::RecurseCreateDirectory(dataFsPathPair.second)) {
            HCP_Log(ERR, MODULE) << "Creat the dir of dataFsPath failed, dataFsPath is :" << dataFsPathPair.second
                                 << HCPENDLOG;
            return false;
        }
    }

    return true;
}

bool ObjectStorageBackupJob::SetupMetaFsForBackupJob()
{
    if (!OBSPlugin::RecurseCreateDirectory(m_metaFsPath)) {
        HCP_Log(ERR, MODULE) << "setup m_metaFsPath for backup job failed" << HCPENDLOG;
        return false;
    }

    // delete directory in case of prev post job not done
    if ((NeedContinueCheckpoint() && m_checkPointRecord.errorCode[SCANSTATUS] == "false")
        || !NeedContinueCheckpoint()) {
        OBSPlugin::Remove(m_scanMetaPath);
        OBSPlugin::Remove(m_statisticsPath);
        OBSPlugin::Remove(m_scanControlPath);
        OBSPlugin::Remove(m_backupControlPath);
        OBSPlugin::Remove(m_scanStatisticsPath);
    }

    /**
     * /<m_cacheFsPath.path>: m_cacheFsPath fs path passed by DME_UBC for OBS Plugin to save metadata's
     *
     * Create folders,
     * | -- CacheFsPath             // CacheFsPath from UBC
     *   | -- copy id
     *      | -- obs-statistics             // Folder to save the statistic of backup main job and sub-jobs
     *      | -- backup-job           // For backup job
     *         | -- scan              // Info saved by SCAN module
     *            | -- meta           // Meta info
     *               | -- previous    // Meta info (metafile, dcache, fcache) of the previous scan
     *               | -- latest      // Meta info (metafile, dcache, fcache) of the current scan
     *            | -- ctrl           // Control info (this in input to BACKUP module)
     *         | -- backup            // Info saved by BACKUP module (TO-DO: We will remove this)
     *            | -- ctrl
     */
    if (!OBSPlugin::RecurseCreateDirectory(m_scanMetaPath)) {
        HCP_Log(ERR, MODULE) << "setup m_scanMetaPath for backup job failed" << HCPENDLOG;
        return false;
    }
    if (!OBSPlugin::RecurseCreateDirectory(m_scanControlPath)) {
        HCP_Log(ERR, MODULE) << "setup m_scanControlPath for backup job failed" << HCPENDLOG;
        return false;
    }
    if (!OBSPlugin::RecurseCreateDirectory(m_backupControlPath)) {
        HCP_Log(ERR, MODULE) << "setup m_backupControlPath for backup job failed" << HCPENDLOG;
        return false;
    }
    if (!OBSPlugin::RecurseCreateDirectory(m_statisticsPath)) {
        HCP_Log(ERR, MODULE) << "setup m_statisticsPath for backup job failed" << HCPENDLOG;
        return false;
    }
    if (!OBSPlugin::RecurseCreateDirectory(m_scanStatisticsPath)) {
        HCP_Log(ERR, MODULE) << "setup m_scanStatisticsPath for backup job failed" << HCPENDLOG;
        return false;
    }

    return true;
}

bool ObjectStorageBackupJob::findFilesInCachePath(std::vector<std::string>& fileList)
{
    std::vector<std::string> tmpFileList;
    if (!OBSPlugin::IsDirExist(m_subCheckPath)) {
        HCP_Log(ERR, MODULE) << "lastIDPath dont exist" << HCPENDLOG;
    }

    if (!GetFileListInDirectory(m_subCheckPath, tmpFileList)) {
        HCP_Log(INFO, MODULE) << "GetFileList failed" << HCPENDLOG;
    }

    if (tmpFileList.empty()) {
        HCP_Log(INFO, MODULE) << "fileList is empty" << HCPENDLOG;
        return false;
    }

    for (const auto& fileName : tmpFileList) {
        std::string prefix = "Success_";
        size_t prefixPos = fileName.find(prefix);
        HCP_Log(INFO, MODULE) << "prefixPos " << prefixPos <<HCPENDLOG;
        if (prefixPos == std::string::npos) {
            continue;
        }
        fileList.push_back(fileName);
    }

    return true;
}

int ObjectStorageBackupJob::GenerateSubJobInner()
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);

    if (!InitJobInfo()) {
        HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
        return Module::FAILED;
    }

    if (!GetPrevBackupCopyInfo()) {
        HCP_Log(ERR, MODULE) << "Get previous backup copyInfo failed" << HCPENDLOG;
        return Module::FAILED;
    }

    PrintJobInfo();

    std::vector<BackupObjectInfo> backupObjectInfos;
    BackupObjectInfo backupObjectInfo;
    backupObjectInfo.esn = m_dataFsPathMap.begin()->first;
    for (const auto& protectObject : m_protectedOBS.obsProtectSubObjectList) {
        PrefixInfo prefixInfo;
        prefixInfo.bucketName = protectObject.bucketName;
        prefixInfo.parentPrefixFilter.assign(protectObject.prefixs.begin(), protectObject.prefixs.end());
        prefixInfo.subPrefixs.assign(protectObject.prefixs.begin(), protectObject.prefixs.end());
        backupObjectInfo.prefixInfo.emplace_back(prefixInfo);
    }
    backupObjectInfos.emplace_back(backupObjectInfo);
    if (CreateSubJobFromBackupObjectInfo(backupObjectInfos) != Module::SUCCESS) {
        HCP_Log(ERR, MODULE) << "CreateSubJobFromBackupObjectInfo failed" << HCPENDLOG;
        return Module::FAILED;
    }
    BackupObjectInfosInCopy backupObjectInfosInCopy;
    backupObjectInfosInCopy.advParms = m_protectedOBS.advParms;
    backupObjectInfosInCopy.objectInfos.swap(backupObjectInfos);
    if (!WriteBackupObjectInfoToFile(backupObjectInfosInCopy)) {
        return Module::FAILED;
    }

    // 创建扫描任务结束统计子任务
    if (!CreateScanJobTeardownTask()) {
        HCP_Log(ERR, MODULE) << "Create ScanJobTeardown subtask failed" << HCPENDLOG;
        return Module::FAILED;
    }

    // 创建备份任务结束统计子任务
    if (!CreateBackupJobTeardownTask()) {
        HCP_Log(ERR, MODULE) << "Create Teardown subtask failed" << HCPENDLOG;
        return Module::FAILED;
    }

    if (!CreateZeroBackupJudgeTask(OBSPlugin::SUBJOB_TYPE_ZERO_BACKUP_JUDGE_PRIO)) {
        HCP_Log(WARN, MODULE) << "Generate zero backup judge task failed" << HCPENDLOG;
    }

    return Module::SUCCESS;
}

bool ObjectStorageBackupJob::WriteBackupObjectInfoToFile(BackupObjectInfosInCopy& backupObjectInfos)
{
    if (backupObjectInfos.objectInfos.empty()) {
        INFOLOG("backupObjectInfos is empty");
        return true;
    }
    if (m_backupObjectInfoFilePath.empty()) {
        ERRLOG("m_backupObjectInfoFilePath path is empty");
        return false;
    }
    CreateDirectory(GetPathName(m_backupObjectInfoFilePath));
    if (!JsonFileTool::WriteToFile(backupObjectInfos, m_backupObjectInfoFilePath)) {
        ERRLOG("WriteBackupObjectInfoToFile failed");
        return false;
    }
    DBGLOG("WriteBackupObjectInfoToFile success, object size:%d,path:%s", backupObjectInfos.objectInfos.size(),
        m_backupObjectInfoFilePath.c_str());
    return true;
}

int ObjectStorageBackupJob::CreateSubJobFromBackupObjectInfo(const std::vector<BackupObjectInfo>& backupObjectInfos)
{
    if (m_idGenerator == nullptr) {
        InitIdGenerator();
    }
    for (const auto& backupObjectInfo : backupObjectInfos) {
        ObjectStorageBackupSubJob subJobInfo;
        subJobInfo.m_SubTaskType = static_cast<uint32_t>(SUBJOB_TYPE_PREFIX_SCAN_PHASE);
        subJobInfo.esn = backupObjectInfo.esn;
        subJobInfo.copyId = m_backupJobPtr->copy.id;
        subJobInfo.prefixInfo.assign(backupObjectInfo.prefixInfo.begin(), backupObjectInfo.prefixInfo.end());
        std::string subJobInfoStr;
        if (!Module::JsonHelper::StructToJsonString(subJobInfo, subJobInfoStr)) {
            HCP_Log(ERR, MODULE) << "Convert to json failed" << HCPENDLOG;
            return Module::FAILED;
        }
        SubJob subJob;
        subJob.__set_jobId(m_jobId);
        subJob.__set_jobName("ObjectStorageBackupPrefixScan_" + std::to_string(m_idGenerator->GenerateId()));
        subJob.__set_jobType(SubJobType::BUSINESS_SUB_JOB);
        subJob.__set_policy(ExecutePolicy::ANY_NODE);
        subJob.__set_jobInfo(subJobInfoStr);
        subJob.__set_jobPriority(SUBJOB_TYPE_PREFIX_SCAN_PHASE_PRIO);
        subJob.__set_ignoreFailed(false);

        if (CreateSubTask(subJob) != Module::SUCCESS) {
            HCP_Log(ERR, MODULE) << "Report subjob to ubc failed" << HCPENDLOG;
            return Module::FAILED;
        }
    }

    return Module::SUCCESS;
}

bool ObjectStorageBackupJob::IsMultiNodeBackup() const
{
    return m_protectedOBS.advParms.m_multiNodeBackupSwitch == TRUE_STR;
}

bool ObjectStorageBackupJob::CreateBackupJobTeardownTask()
{
    std::string jobName = SUBJOB_TYPE_TEARDOWN_JOBNAME;
    return CreateSubTaskWithRetry(SUBJOB_TYPE_TEARDOWN_PHASE, jobName,
        SUBJOB_TYPE_TEARDOWN_PHASE_PRIO);
}

bool ObjectStorageBackupJob::CreateScanJobTeardownTask()
{
    std::string jobName = SUBJOB_TYPE_SCAN_TEARDOWN_JOBNAME;
    return CreateSubTaskWithRetry(SUBJOB_TYPE_SCAN_TEARDOWN_PHASE, jobName,
        SUBJOB_TYPE_SCAN_TEAR_DOWN_PHASE_PRIO);
}

int ObjectStorageBackupJob::ExecuteSubJobInner()
{
    m_subJobRequestId = GenerateHash(m_jobId + m_subJobId);
    HCPTSP::getInstance().reset(m_subJobRequestId);
    HCP_Log(INFO, MODULE) << "jobName: " << m_subJobInfo->jobName << ", jobId: " << m_jobId
                          << ", subJobId: " << m_subJobId << ", subJobRequestId: " << m_subJobRequestId << HCPENDLOG;
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);
    
    if (!InitJobInfo()) {
        HCP_Log(ERR, MODULE) << "Init Job Info failed" << HCPENDLOG;
        return Module::FAILED;
    }

    PrintSubJobInfo(m_subJobInfo);
    ObjectStorageBackupSubJob subJob {};
    if (!Module::JsonHelper::JsonStringToStruct(m_subJobInfo->jobInfo, subJob)) {
        HCP_Log(ERR, MODULE) << "Get backup subjob info failed" << HCPENDLOG;
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::FAILED, OBSPlugin::PROGRESS0);
        return Module::FAILED;
    }

    if (subJob.m_SubTaskType == SUBJOB_TYPE_PREFIX_SCAN_PHASE) {
        return ExecutePrefixScanSubJob(subJob);
    } else if (subJob.m_SubTaskType == SUBJOB_TYPE_SCAN_TEARDOWN_PHASE) {
        return ExecuteScanTeardownSubJob();
    } else if (subJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_COPY_PHASE) {
        return ExecuteDataCopySubJob(subJob);
    } else if (subJob.m_SubTaskType == SUBJOB_TYPE_DATACOPY_DELETE_PHASE) {
        return ExecuteDataCopySubJob(subJob);
    } else if (subJob.m_SubTaskType == SUBJOB_TYPE_TEARDOWN_PHASE) {
        return ExecuteTeardownSubJob();
    } else if (subJob.m_SubTaskType == SUBJOB_TYPE_COPYMETA_PHASE) {
        return ExecuteCopyMetaSubJob(subJob);
    } else if (subJob.m_SubTaskType == SUBJOB_TYPE_CREATE_SUBJOB_PHASE) {
        return ExecuteCreateSubJob(subJob);
    } else if (subJob.m_SubTaskType == SUBJOB_TYPE_ZERO_BACKUP_JUDGE_PHASE) {
        HCP_Log(INFO, MODULE) << "Start merge db file, meta file path " << m_metaFsPathMap.begin()->second << HCPENDLOG;
        std::string filePath = m_statisticsPath + "/backup-stats-main-" + m_backupJobPtr->jobId + ".json";
        return ExecuteZeroBackupJudgeSubJob(filePath);
    } else {
        return Module::FAILED;
    }

    return Module::SUCCESS;
}

int ObjectStorageBackupJob::ExecuteCreateSubJob(const ObjectStorageBackupSubJob& subJob)
{
    INFOLOG("Enter ExecuteCreateSubJob: %s, %s", m_jobId.c_str(), m_subJobId.c_str());
    bool ret = CreateBackupCopySubTask(subJob);
    return ret ? Module::SUCCESS : Module::FAILED;
}

int ObjectStorageBackupJob::ExecuteSkipScanSubJob(const ObjectStorageBackupSubJob& subJob)
{
    HCP_Log(INFO, MODULE) << "Skip scan jobId: " << m_jobId << ", subJobId: " << m_subJobId << HCPENDLOG;

    AppProtect::SubJobStatus::type jobStatus = SubJobStatus::COMPLETED;
    int jobProgress = OBSPlugin::PROGRESS100;
    std::string jobLogLabel = "";

    std::string scanFileName;
    std::vector<std::string> fileList {};
    GetFileListInDirectory(m_scanStatisticsPath, fileList);
    std::string suffix = SCAN_SUB_JOB_RESTORE_INFO_FILE_NAME_SUFFIX;
    for (const auto& fileName : fileList) {
        size_t suffixPos = fileName.find(suffix);
        if (suffixPos == std::string::npos) {
            continue;
        }
        HCP_Log(DEBUG, MODULE) << "suffixPos " << suffixPos << HCPENDLOG;
        scanFileName = fileName;
        HCP_Log(DEBUG, MODULE) << "scanFileName: " << scanFileName << HCPENDLOG;
    }

    if (!JsonFileTool::ReadFromFile(scanFileName, m_scanStats)) {
        HCP_Log(INFO, MODULE) << "No scan restore info is read, file name " << scanFileName << HCPENDLOG;
    }

    if (!CreateBackupJobTaskToCreateFurtherSubTasks(subJob)) {
        ERRLOG("Create SubJob Failed!");
        if (NeedContinueCheckpoint()) {
            RecordErrInfo(SCANSTATUS);
        }
        return false;
    }

    ReportScannerStatus(m_scanStats, jobStatus, jobLogLabel, jobProgress);
    return true;
}


bool ObjectStorageBackupJob::ScanPrefix(const ObjectStorageBackupSubJob& subJob,
    SubJobStatus::type& jobStatus,
    std::string& jobLogLabel,
    int& jobProgress)
{
    for (const auto& prefixInfo : subJob.GetFlatPrefixInfos()) { // 前缀信息预处理，一次只扫描一个前缀（subPrefixs平铺）
        if (m_scanStats.FindScannedPrefixInfo(prefixInfo)) {
            if (prefixInfo.subPrefixs.empty()) {
                HCP_Log(INFO, MODULE) << "bucket " << prefixInfo.bucketName << " has already scanned, skip"
                    << HCPENDLOG;
            } else {
                HCP_Log(INFO, MODULE) << "bucket " << prefixInfo.bucketName << " prefixs"
                    << prefixInfo.subPrefixs.front() << " has already scanned, skip" << HCPENDLOG;
            }
            
            continue;
        }

        jobStatus = AppProtect::SubJobStatus::type::FAILED;
        if (!StartScanner(prefixInfo)) {
            HCP_Log(ERR, MODULE) << "Start Scanner Failed" << HCPENDLOG;
            if (m_scanner) {
                m_scanner->Destroy();
            }
            return false;
        }

        HCP_Log(INFO, MODULE) << "Start Scanner Success" << HCPENDLOG;
        MonitorScanner(prefixInfo, subJob.esn, jobStatus, jobLogLabel, jobProgress);
        if (jobStatus != SubJobStatus::COMPLETED) {
            HCP_Log(ERR, MODULE) << "Monitor Scanner Failed" << HCPENDLOG;
            return false;
        }
        HCP_Log(INFO, MODULE) << "Scan finished for " << prefixInfo.bucketName << ", its first prefix is "
            << (prefixInfo.subPrefixs.empty() ? " " : prefixInfo.subPrefixs.front()) << HCPENDLOG;
    }

    return true;
}


int ObjectStorageBackupJob::ExecutePrefixScanSubJob(const ObjectStorageBackupSubJob& subJob)
{
    INFOLOG("ExecutePrefixScanSubJob, esn:%s", subJob.esn.c_str());
    ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0, "");
    if (GetPrevBackupInfo(subJob) == Module::FAILED) {
        HCP_Log(ERR, MODULE) << "Get previous backup copyInfo failed" << HCPENDLOG;
        return Module::FAILED;
    }

    if (NeedContinueCheckpoint() && m_checkPointRecord.errorCode[SCANSTATUS] == "done") {
        return ExecuteSkipScanSubJob(subJob) ? Module::SUCCESS : Module::FAILED;
    }

    int scanParallel = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION, "DME_OBS_SCAN_PARALLEL_SWITCH");
    m_isScanParallel = (scanParallel == 0) ? false : true;
    HCP_Log(INFO, MODULE) << "The scan parallel switch is " << m_isScanParallel << HCPENDLOG;

    std::shared_ptr<void> defer(nullptr, [&](...) {
        if (!m_isScanParallel) {
            m_scanMgr.ReleaseRunningTask(m_subJobId);
            ResourceManager::GetInstance().Erase(m_jobId);
        }
        HCP_Log(INFO, MODULE) << "Release scanner for " << m_jobId << ",subJobId:"<< m_subJobId << HCPENDLOG;
    });

    const std::string fileName = m_scanStatisticsPath + "/" + m_subJobId  + SCAN_SUB_JOB_RESTORE_INFO_FILE_NAME_SUFFIX;
    if (IsFileExist(fileName) && !JsonFileTool::ReadFromFile(fileName, m_scanStats)) {
        HCP_Log(INFO, MODULE) << "No scan restore info is read, file name " << fileName << HCPENDLOG;
    }

    if (!m_isScanParallel) {
        HoldScanner();  // HoldScanner结束说明当前扫描优先级最高，调用m_scanMgr.Destroy()不会影响其他扫描任务
    }
    m_scanStats.m_scanStartTime = OBSPlugin::GetCurrentTimeInSeconds();
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    if (!m_isScanParallel) {
        ResourceManager::GetInstance().Insert(m_jobId);
    }
    AppProtect::SubJobStatus::type jobStatus = AppProtect::SubJobStatus::type::FAILED;
    std::string jobLogLabel = "";
    int jobProgress = 0;

    ReadEncodeBucket(m_bucketPrefixMap, m_encodeSwitch);
    ScanPrefix(subJob, jobStatus, jobLogLabel, jobProgress);

    if (!CreateBackupJobTaskToCreateFurtherSubTasks(subJob)) {
        ERRLOG("Create SubJob Failed!");
        return Module::FAILED;
    }

    ReportScannerStatus(m_scanStats, jobStatus, jobLogLabel, jobProgress);
    return Module::SUCCESS;
}

bool ObjectStorageBackupJob::CreateBackupJobTaskToCreateFurtherSubTasks(const ObjectStorageBackupSubJob& subJob)
{
    std::string jobName = "ObsBackup_CreateSubJob" + m_jobId;
    ObjectStorageBackupSubJob subJobInfo = subJob;
    subJobInfo.m_SubTaskType = SUBJOB_TYPE_CREATE_SUBJOB_PHASE;
    std::string subJobStr;
    if (!Module::JsonHelper::StructToJsonString(subJobInfo, subJobStr)) {
        ERRLOG("Json Transfer Failed!");
        return false;
    }
    SubJob subTask {};
    subTask.__set_jobId(m_jobId);
    subTask.__set_jobType(SubJobType::BUSINESS_SUB_JOB);
    subTask.__set_policy(ExecutePolicy::ANY_NODE);
    subTask.__set_jobInfo(subJobStr);
    subTask.__set_ignoreFailed(false);
    subTask.__set_jobPriority(SUBJOB_TYPE_CREATE_SUB_JOB_PRIO);
    subTask.__set_jobName(jobName);

    do {
        if (IsAbortJob()) {
            WARNLOG("Exit received Abort for taskid: %s, subtaskid: %s", m_jobId.c_str(), m_subJobId.c_str());
            break;
        }
        int ret = CreateSubTask(subTask);
        if (ret == Module::SUCCESS) {
            break;
        } else if (ret == Module::RETRY) {
            HCP_Log(WARN, MODULE) << "Create subtask failed with retriable error"<< HCPENDLOG;
            SleepForCreateSubTaskError();
            continue;
        } else {
            ERRLOG("Exit CreateBackupJobTaskToCreateFurtherSubTasks, CreateSubtask failed");
            return false;
        }
    } while (true);
    return true;
}

bool ObjectStorageBackupJob::CreateBackupCopySubTask(const ObjectStorageBackupSubJob& subJob)
{
    std::vector<std::string> metaPreFixList {};
 
    if (NeedContinueCheckpoint()) {
        GetFileListInDirectory(m_subCheckMetaPath, metaPreFixList);
    }
 
    for (const auto& prefixInfo : subJob.GetFlatPrefixInfos()) {
        if (!CreateSubTasksFromCtrlFile(prefixInfo, subJob.esn, SUBJOB_TYPE_DATACOPY_COPY_PHASE)) {
            HCP_Log(ERR, MODULE) << "Create subtask failed" << HCPENDLOG;
            return false;
        }
        if (!CreateSubTasksFromCtrlFile(prefixInfo, subJob.esn, SUBJOB_TYPE_DATACOPY_DELETE_PHASE)) {
            HCP_Log(ERR, MODULE) << "Create subtask failed" << HCPENDLOG;
            return false;
        }
        if (NeedContinueCheckpoint() && !findFileInMetaPath(metaPreFixList, prefixInfo) || !NeedContinueCheckpoint()) {
            if (!CreateBackupCopyMetaTask(prefixInfo, subJob.esn)) {
                HCP_Log(ERR, MODULE) << "Create Backup Copy Meta Failed!" << HCPENDLOG;
                return false;
            }
        }
    }

    return true;
}

bool ObjectStorageBackupJob::findFileInMetaPath(std::vector<std::string>& metaPreFixList, const PrefixInfo& prefixInfo)
{
    std::string perfixHash = (GetScanPrefixHash(prefixInfo) == "0") ? prefixInfo.bucketName
        : GetScanPrefixHash(prefixInfo);
    std::string prefixInfoFile = PathJoin(m_subCheckMetaPath, perfixHash);
    if (IsFileExist(prefixInfoFile)) {
        std::string prefixInfoStr = (prefixInfo.subPrefixs.empty()) ? prefixInfo.bucketName
            : prefixInfo.subPrefixs.front();
        HCP_Log(INFO, MODULE) << "Skip success prefix meta " << prefixInfoStr << HCPENDLOG;
        return true;
    }
 
    return false;
}

void ObjectStorageBackupJob::HoldScanner()
{
    m_scanMgr.RegisterTask(ScanTaskLevel::REGULAR, m_subJobId);
    while (!m_scanMgr.HoldRunningTask(m_subJobId)) {
        if (IsAbortJob()) {
            HCP_Log(WARN, MODULE) << "Scan job " << m_subJobId << " is aborted" << HCPENDLOG;
            return;
        }
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0, "");
        Module::SleepFor(std::chrono::seconds(NUMBER10));
    }

    m_scanStats.m_scanStartTime = OBSPlugin::GetCurrentTimeInSeconds();
}

bool ObjectStorageBackupJob::StartScanner(const PrefixInfo& subObject)
{
    if (!OBSPlugin::RecurseCreateDirectory(m_scanMetaPath + GetScanGenFileRelativeDir(subObject))) {
        HCP_Log(ERR, MODULE) << "create meta path - "
            << m_scanMetaPath + GetScanGenFileRelativeDir(subObject) << " failed" << HCPENDLOG;
        return false;
    }
    if (!OBSPlugin::RecurseCreateDirectory(m_scanControlPath + GetScanGenFileRelativeDir(subObject))) {
        HCP_Log(ERR, MODULE) << "create meta path for ctrl files - "
            << m_scanControlPath + GetScanGenFileRelativeDir(subObject) << " failed" << HCPENDLOG;
        return false;
    }
    if (!OBSPlugin::RecurseCreateDirectory(m_backupControlPath + GetScanGenFileRelativeDir(subObject))) {
        HCP_Log(ERR, MODULE) << "create backup ctrl path - "
            << m_backupControlPath + GetScanGenFileRelativeDir(subObject) << " failed" << HCPENDLOG;
        return false;
    }

    ScanConfig scanConfig;
    if (!FillScanConfig(subObject, scanConfig)) {
        HCP_Log(ERR, MODULE) << "Fill scan config failed!" << HCPENDLOG;
        return false;
    }
    HCP_Log(INFO, MODULE) << "Create scanner Initiate" << HCPENDLOG;
    m_scanner = ScanMgr::CreateScanInst(scanConfig);
    if (!m_scanner || m_scanner->Start() != SCANNER_STATUS::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Start scanner instance failed!" << HCPENDLOG;
        return false;
    }

    return true;
}

static void ScannerCtrlFileCallBack(void* usrData, const std::string& controlFilePath)
{
    usrData = usrData;
    HCP_Log(DEBUG, MODULE) << "Callback Received for control File path:"<< WIPE_SENSITIVE(controlFilePath) << HCPENDLOG;
}
 
static void BackupDelCtrlCallBack(void* usrData, const std::string& controlFilePath)
{
    usrData = usrData;
    HCP_Log(DEBUG, MODULE) << "Callback Received for control File path:"<< WIPE_SENSITIVE(controlFilePath) << HCPENDLOG;
}

bool ObjectStorageBackupJob::FillScanConfig(const PrefixInfo& subObject, ScanConfig& scanConfig)
{
    HCP_Log(INFO, MODULE) << " Enter FillScanConfig" << HCPENDLOG;
    size_t subJobRequestId = GenerateHash(m_jobId + m_subJobId);

    scanConfig.jobId = m_jobId;
    scanConfig.subJobId = m_jobId;
    scanConfig.reqID = subJobRequestId;
    scanConfig.failureRecordRootPath = m_failureRecordRoot;

    FillScanConfigBasedOnEnviroment(scanConfig);
    FillScanConfigBasedOnObsInfo(subObject, scanConfig);
    scanConfig.scanType = IsFullBackup() ? (ScanJobType::FULL) : (ScanJobType::INC);
    scanConfig.usrData = (void *)this;
    scanConfig.lastBackupTime = IsFullBackup() ? 0 : m_prevBackupRecord.m_lastBackupTime;
    scanConfig.useLastBackupTime = true;
    HCP_Log(DEBUG, MODULE) << "lastBackupTime: " << OBSPlugin::ConvertToReadableTime(scanConfig.lastBackupTime)
                           << HCPENDLOG;

    scanConfig.scanExtendAttribute = false;
    scanConfig.scanAcl = IsBackupAcl();
    HCP_Log(INFO, MODULE) << "Scan meta switch - " << scanConfig.scanExtendAttribute << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "Scan acl switch - " << scanConfig.scanAcl << HCPENDLOG;
    /* Path */
    scanConfig.metaPath =  m_scanMetaPath + GetScanGenFileRelativeDir(subObject);
    scanConfig.metaPathForCtrlFiles = m_scanControlPath + GetScanGenFileRelativeDir(subObject);
    /* Callbacks Regiter */
    scanConfig.scanResultCb = ScannerCtrlFileCallBack;
    scanConfig.scanHardlinkResultCb = nullptr;
    scanConfig.mtimeCtrlCb = nullptr;
    scanConfig.deleteCtrlCb = BackupDelCtrlCallBack;

    scanConfig.maxCommonServiceInstance = 1;
    scanConfig.scanCheckPointEnable = false;
    scanConfig.triggerTime = OBSPlugin::GetCurrentTimeInSeconds();
    scanConfig.encodeEnable = CheckEncode(m_authInfo.endPoint, subObject.bucketName,
        m_bucketPrefixMap, m_encodeSwitch);

    FillScanConfigForNative(scanConfig);

    return true;
}

void ObjectStorageBackupJob::FillScanConfigBasedOnEnviroment(ScanConfig& scanConfig)
{
    scanConfig.maxWriteQueueSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
        "DME_OBS_SCAN_X8000_MAX_WRITE_QUEUE_SIZE");
    scanConfig.maxScanQueueSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
        "DME_OBS_SCAN_X8000_MAX_SCAN_QUEUE_SIZE");
    scanConfig.minScanQueueSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
        "DME_OBS_SCAN_X8000_MIN_SCAN_QUEUE_SIZE");
    scanConfig.writeQueueSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
        "DME_OBS_SCAN_X8000_WRITE_QUEUE_SIZE");
    scanConfig.dirEntryReadCount = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
        "DME_OBS_SCAN_X8000_DIR_ENTRY_READ_COUNT");
    scanConfig.scanMetaFileSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
        "DME_OBS_SCAN_X8000_DEFAULT_META_FILE_SIZE");
    scanConfig.producerThreadCount = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
        "DME_OBS_SCAN_X8000_PRODUCER_THREAD_COUNT");

    std::string deploy_type = OBSPlugin::GetDeployType();
    if (deploy_type == X3000_DEPLOY_TYPE) {
        scanConfig.writeQueueSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X3000_WRITE_QUEUE_SIZE");
        scanConfig.scanMetaFileSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X3000_DEFAULT_META_FILE_SIZE");
        scanConfig.producerThreadCount = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X3000_PRODUCER_THREAD_COUNT");
        HCP_Log(INFO, MODULE) << "Use x3000 config." << " writeQueueSize - " << scanConfig.writeQueueSize
            << " scanMetaFileSize - " << scanConfig.scanMetaFileSize << " producerThreadCount - "
            << (int)scanConfig.producerThreadCount << HCPENDLOG;
    } else if (deploy_type == X6000_DEPLOY_TYPE) {
        scanConfig.maxOpendirReqCount = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X6000_OPENDIR_REQ_CNT");
        scanConfig.maxWriteQueueSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X6000_MAX_WRITE_QUEUE_SIZE");
        scanConfig.maxScanQueueSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X6000_MAX_SCAN_QUEUE_SIZE");
        scanConfig.minScanQueueSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X6000_MIN_SCAN_QUEUE_SIZE");
        scanConfig.writeQueueSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X6000_WRITE_QUEUE_SIZE");
        scanConfig.dirEntryReadCount = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X6000_DIR_ENTRY_READ_COUNT");
        scanConfig.scanMetaFileSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X6000_DEFAULT_META_FILE_SIZE");
        scanConfig.producerThreadCount = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X3000_PRODUCER_THREAD_COUNT");
        HCP_Log(INFO, MODULE) << "Use x6000 config." << HCPENDLOG;
    }

    if (m_authInfo.getStorageType() == StorageType::ALI) {
        scanConfig.producerThreadCount = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
            "DME_OBS_SCAN_X8000_PRODUCER_THREAD_COUNT");
    }
}

void ObjectStorageBackupJob::FillScanConfigBasedOnObsInfo(const PrefixInfo& subObject, ScanConfig& scanConfig)
{
    std::string certContent = OBSPlugin::Base64Decode(m_authInfo.certification);
    CertManager certMgr(m_certFilePath, m_jobId);
    certMgr.SaveFile(certContent);
    scanConfig.scanIO = IOEngine::OBJECTSTORAGE;
    scanConfig.obs.authArgs.storageType = m_authInfo.getStorageType();
    scanConfig.obs.authArgs.verifyInfo.accessKey = m_authInfo.ak;
    scanConfig.obs.authArgs.verifyInfo.secretKey = m_authInfo.sk;
    scanConfig.obs.authArgs.verifyInfo.endPoint = m_authInfo.endPoint;
    scanConfig.obs.authArgs.verifyInfo.useHttps = m_authInfo.openHttps();
    scanConfig.obs.authArgs.verifyInfo.certHttps = certContent;
    scanConfig.obs.authArgs.verifyInfo.caPath = certMgr.GetCAPath();
    scanConfig.obs.authArgs.verifyInfo.caFile = certMgr.GetCAFile();
    scanConfig.obs.authArgs.verifyInfo.useProxy = m_authInfo.openProxy();
    scanConfig.obs.authArgs.verifyInfo.proxyHostName = m_authInfo.proxyHostName;
    scanConfig.obs.authArgs.verifyInfo.proxyUserName = m_authInfo.proxyUserName;
    scanConfig.obs.authArgs.verifyInfo.proxyUserPwd = m_authInfo.proxyUserPwd;
    ObjectStorageBucket objectStorageBucket;
    objectStorageBucket.bucketName = subObject.bucketName;
    objectStorageBucket.prefix.assign(subObject.subPrefixs.begin(), subObject.subPrefixs.end());
    if (IsMultiNodeBackup() && subObject.isPrefixLevelObject) {
        objectStorageBucket.prefixSplitDepth = std::stoi(m_protectedOBS.advParms.m_prefixSplitDepth);
        objectStorageBucket.delimiter = m_protectedOBS.advParms.m_prefixSplitter;
        objectStorageBucket.prefix.assign(subObject.parentPrefixFilter.begin(), subObject.parentPrefixFilter.end());
    }
    if (IsBucketLogInc()) {
        scanConfig.obs.IncUseLog = true;
        std::string parentPrefix = FindParentPrefix(subObject);
        objectStorageBucket.logDir = PathJoin(
            m_objectListDir,
            subObject.bucketName,
            OBJECT_LIST_FILE_UNIQUE_DIR_NAME,
            BucketLogParser::HashFunc(parentPrefix));
        HCP_Log(INFO, MODULE) << "logDir " << objectStorageBucket.logDir << HCPENDLOG;
        if (!OBSPlugin::RecurseCreateDirectory(objectStorageBucket.logDir)) { // 确保修改文件列表目录一定存在
            HCP_Log(WARN, MODULE) << "create logDir - " << objectStorageBucket.logDir << " failed" << HCPENDLOG;
        }
    }
    scanConfig.obs.buckets.emplace_back(objectStorageBucket);
}

bool ObjectStorageBackupJob::IsBackupAcl() const
{
    return m_protectedOBS.advParms.m_isBackupAcl == TRUE_STR;
}

void ObjectStorageBackupJob::ReportScannerStatus(const ObjectStorageNativeScanStatistics& scanStatistics,
    const AppProtect::SubJobStatus::type& jobStatus, const std::string& jobLogLabel, int jobProgress)
{
    int64_t errorCode;
    if (jobStatus == SubJobStatus::COMPLETED) {
        if (scanStatistics.m_totFailedDirs != 0) {
            if (m_scanError.errMessage == "AccessDenied" || m_scanError.errCode == "AccessDenied") {
                HCP_Log(ERR, MODULE) << "Scan AccessDenied " << m_scanError.errMessage << " "
                    << m_scanError.errCode << " "<< m_scanError.linuxErrCode << HCPENDLOG;
                errorCode = ObsErrorCode::ERROR_BACKUP_FAILED_NOACCESS_ERROR;
            } else {
                errorCode = ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR;
            }
            ReportJobDetailsWithDetailAndErrcode(std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, 0),
                "object_storage_plugin_backup_scan_fail_label", errorCode, m_scanError.errMessage);
        } else {
            ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::COMPLETED, OBSPlugin::PROGRESS100);
        }
    } else {
        int64_t errorCode;
        if (m_scanStatus == SCANNER_STATUS::PROTECTED_SERVER_NOT_REACHABLE) {
            errorCode = ObsErrorCode::ERROR_OBS_SCAN_PROTECTED_SERVER_NOT_REACHABLE;
        } else if (m_scanStatus ==  SCANNER_STATUS::SECONDARY_SERVER_NOT_REACHABLE) {
            errorCode = ObsErrorCode::ERROR_OBS_SCAN_SECONDARY_SERVER_NOT_REACHABLE;
        } else {
            errorCode = ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR;
        }
        if (errorCode == ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR && !m_scanError.errMessage.empty()) {
            ReportJobDetailsWithDetailAndErrcode(std::make_tuple(JobLogLevel::TASK_LOG_ERROR, jobStatus, jobProgress),
                jobLogLabel, errorCode, m_scanError.errMessage);
        } else {
            ReportJobDetailsWithLabelAndErrcode(
                std::make_tuple(JobLogLevel::TASK_LOG_ERROR, jobStatus, jobProgress), jobLogLabel, errorCode);
        }
    }
}

void ObjectStorageBackupJob::ReportJobDetailsWithDetailAndErrcode(
    const std::tuple<JobLogLevel::type, SubJobStatus::type, const int> &reportInfo, const std::string& logLabel,
    const int64_t errCode, const std::string& message)
{
    JobLogLevel::type logLevel = std::get<0>(reportInfo);
    SubJobStatus::type jobStatus = std::get<1>(reportInfo);
    int jobProgress = std::get<2>(reportInfo);
    SubJobDetails subJobDetails;
    LogDetail logDetail {};
    logDetail.__set_additionalDesc(vector<std::string>{message});
    std::vector<LogDetail> logDetailList;
    ActionResult result;
    AddLogDetail(logDetail, logLabel, logLevel);
    AddErrCode(logDetail, errCode);
    REPORT_LOG2AGENT(subJobDetails, result, logDetailList, logDetail, jobProgress, 0, jobStatus);
}

void ObjectStorageBackupJob::ReportScannerCompleteStatus(const ObjectStorageNativeScanStatistics& scanStatistics)
{
    if (scanStatistics.m_totFailedFiles != 0) {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_WARNING, SubJobStatus::FAILED, OBSPlugin::PROGRESS100),
            "object_storage_plugin_backup_scan_completed_with_warn_label",
            0,
            std::to_string(scanStatistics.m_totFiles),
            OBSPlugin::FormatCapacity(scanStatistics.m_totalSize),
            std::to_string(scanStatistics.m_totFailedFiles),
            std::to_string(scanStatistics.m_totFilesToBackup),
            OBSPlugin::FormatCapacity(scanStatistics.m_totalSizeToBackup));
    } else {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_INFO, SubJobStatus::COMPLETED, OBSPlugin::PROGRESS100),
            "object_storage_plugin_backup_scan_completed_label",
            0,
            std::to_string(scanStatistics.m_totFiles),
            OBSPlugin::FormatCapacity(scanStatistics.m_totalSize),
            std::to_string(scanStatistics.m_totFilesToBackup),
            OBSPlugin::FormatCapacity(scanStatistics.m_totalSizeToBackup));
    }
}

bool ObjectStorageBackupJob::MonitorScanner(const PrefixInfo &prefixInfo, const std::string &esn,
    SubJobStatus::type &jobStatus, std::string &jobLogLabel, int &jobProgress)
{
    HCP_Log(INFO, MODULE) << "Enter Monitor Scanner " << prefixInfo.bucketName << HCPENDLOG;
    SCANNER_TASK_STATUS scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_INPROGRESS;
    jobStatus = SubJobStatus::RUNNING;
    jobProgress = 0;
    jobLogLabel = "";

    ObjectStorageNativeScanStatistics scanStatistics;
    do {
        m_scanStatus = m_scanner->GetStatus();
        /* Ensure scanner is ready and start to scan */
        if (m_scanStatus == SCANNER_STATUS::INIT) {
            Module::SleepFor(std::chrono::seconds(SUBTASK_WAIT_FOR_SCANNER_READY_IN_SEC));
            continue;
        }
        UpdateScannerStatistics(m_scanStats, scanStatistics);
        FillMonitorScannerVarDetails(scanTaskStatus, jobStatus, jobLogLabel, jobProgress);
        UpdateScanStatInfo(prefixInfo, scanStatistics);
        if (scanTaskStatus != SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_INPROGRESS) {
            break;
        }

        if (IsAbortJob()) {
            HCP_Log(INFO, MODULE) << "Scanner - Abort is invocked for" << " taskid: " << m_jobId
                << ", subtaskid: " << m_subJobId << HCPENDLOG;
            if (m_scanner->Abort() != SCANNER_STATUS::SUCCESS) {
                HCP_Log(ERR, MODULE) << "scanner Abort is failed" << HCPENDLOG;
            }
        }
        Module::SleepFor(std::chrono::seconds(GENERATE_SUBTASK_MONITOR_DUR_IN_SEC));
    } while (true);

    QueryScannerStatus();

    m_scanner->Destroy();
    m_scanner = nullptr;

    m_scanStats = scanStatistics;
    m_scanStats.m_scannedPrefixInfo.emplace_back(prefixInfo);
    const std::string fileName = m_scanStatisticsPath + "/" + m_subJobId + SCAN_SUB_JOB_RESTORE_INFO_FILE_NAME_SUFFIX;
    if (!JsonFileTool::WriteToFile(m_scanStats, fileName)) {
        HCP_Log(WARN, MODULE) << "Write scanStatistics failed, file name " << fileName << HCPENDLOG;
    }

    HCP_Log(INFO, MODULE) << "Exit Monitor Scanner" << HCPENDLOG;
    return true;
}

void ObjectStorageBackupJob::QueryScannerStatus()
{
    m_scanError = m_scanner->QueryFailure();
}
 
void ObjectStorageBackupJob::FillMonitorScannerVarDetails(SCANNER_TASK_STATUS& scanTaskStatus,
    SubJobStatus::type& jobStatus, std::string& jobLogLabel, int& jobProgress)
{
    if (m_scanStatus == SCANNER_STATUS::COMPLETED) {
        HCP_Log(INFO, MODULE) << "Scan completed" << HCPENDLOG;
        jobProgress = OBSPlugin::PROGRESS100;
        jobStatus = SubJobStatus::COMPLETED;
        jobLogLabel = "";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_SUCCESS;
    } else if (m_scanStatus == SCANNER_STATUS::FAILED) {
        HCP_Log(ERR, MODULE) << "Scan failed" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "object_storage_plugin_backup_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::ABORT_IN_PROGRESS) {
        HCP_Log(ERR, MODULE) << "Scan abort in progress" << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTING;
        jobLogLabel = "";
    } else if (m_scanStatus == SCANNER_STATUS::ABORTED) {
        HCP_Log(ERR, MODULE) << "Scan aborted" << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTED;
        jobLogLabel = "";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_ABORTED;
    } else if (m_scanStatus == SCANNER_STATUS::SCAN_READ_COMPLETED) {
        jobStatus = SubJobStatus::RUNNING;
        jobLogLabel = "";
    } else if (m_scanStatus == SCANNER_STATUS::CTRL_DIFF_IN_PROGRESS) {
        jobStatus = SubJobStatus::RUNNING;
        jobLogLabel = "";
    } else if (m_scanStatus == SCANNER_STATUS::SECONDARY_SERVER_NOT_REACHABLE) {
        HCP_Log(ERR, MODULE) << "Scan failed as sec nas server is not reachable" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "object_storage_plugin_backup_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::PROTECTED_SERVER_NOT_REACHABLE) {
        HCP_Log(ERR, MODULE) << "Scan failed as protected nas server is not reachable" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "object_storage_plugin_backup_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::ERROR_INC_TO_FULL) {
        HCP_Log(ERR, MODULE) << "Scan failed as to change INC to FULL Backup" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        jobLogLabel = "object_storage_plugin_backup_scan_fail_label";
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    }
}

void ObjectStorageBackupJob::ReportScannerRunningStatus(const ObjectStorageNativeScanStatistics& scanStatistics)
{
    if ((GetCurrentTimeInSeconds() - m_lastScannerReportTime) > SCANNER_REPORT_CIRCLE_TIME) {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_scan_inprogress_label",
            0,
            std::to_string(scanStatistics.m_totFiles),
            FormatCapacity(scanStatistics.m_totalSize),
            std::to_string(scanStatistics.m_totFilesToBackup),
            FormatCapacity(scanStatistics.m_totalSizeToBackup));
        m_lastScannerReportTime = OBSPlugin::GetCurrentTimeInSeconds();
    } else {
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);
    }
}

bool ObjectStorageBackupJob::QueryPreviousCopy(Copy& lastCopy)
{
    DBGLOG("Enter QueryPreviousCopy, jobId: %s", m_jobId.c_str());
    std::set<AppProtect::CopyDataType>
        dataTypes {AppProtect::CopyDataType::INCREMENT_COPY, AppProtect::CopyDataType::FULL_COPY};
    Application appInfo = m_backupJobPtr->protectObject;
    std::string copyId = m_backupJobPtr->copy.id;
    int retryCnt = 0;
    do {
        if (retryCnt % MIDDLE_EXECEPTION_RETRY_TIMES == 0) {
            KeepJobAlive(m_jobId, m_subJobId);
        }
        try {
            JobService::QueryPreviousCopy(lastCopy, appInfo, dataTypes, copyId, m_jobId);
        } catch (AppProtectFrameworkException &e) {
            WARNLOG("QueryPreviousCopy failed, code: %d, retry: %d, jobId: %s", e.code, retryCnt, m_jobId.c_str());
            if (e.code == E_QUERIED_RESOURCE_NOT_EXIST) {
                ERRLOG("Exit QueryPreviousCopy, jobId: %s, last copy not exist!", m_jobId.c_str());
                return false;
            }
            Module::SleepFor(std::chrono::seconds(EXECEPTION_RETRY_INTERVAL));
            continue;
        } catch (...) {
            ERRLOG("unknown exception happened, QueryPreviousCopy failed!");
            return false;
        }
        DBGLOG("Exit QueryPreviousCopy success: %s for job: %s", lastCopy.id.c_str(), m_jobId.c_str());
        return true;
    } while (++retryCnt <= EXECEPTION_RETRY_TIMES);
    ERRLOG("Exit QueryPreviousCopy failed: for job: %s, after max retry & failed", m_jobId.c_str());
    return false;
}

bool ObjectStorageBackupJob::GetCtrlFile(std::string srcDir, std::string dstDir,
    std::vector<std::string>& srcFileList, const PrefixInfo& prefixInfo)
{
    if (NeedContinueCheckpoint() && m_checkPointRecord.errorCode[SCANSTATUS] == "done") {
        std::vector<std::string> checkFileList {};
        std::vector<std::string> scanList {};
        
        if (!findFilesInCachePath(checkFileList)) {
            return false;
        }
        std::string scanPath = PathJoin(m_cacheFsPath, "backup-job", "scan", "ctrl",
            GetScanGenFileRelativeDir(prefixInfo));
        
        GetFileListInDirectory(scanPath, scanList);

        for (const auto& filePath : scanList) {
            size_t prefixPos = filePath.rfind('/');
            if (prefixPos == std::string::npos) {
                continue;
            }
            std::string fileName = "Success_" + filePath.substr(prefixPos + 1);
            std::string successFileName = PathJoin(m_subCheckPath, fileName);
            if (IsFileExist(successFileName)) {
                HCP_Log(INFO, MODULE) << "Skip success controlfile: " << successFileName << HCPENDLOG;
                continue;
            }
            srcFileList.push_back(filePath);
        }
        OBSPlugin:RemoveFile(PathJoin(m_subCheckPath, "obs-statistics"));
    } else {
        if (!checkFilePathAndGetSrcFileList(srcDir, dstDir, srcFileList)) {
            return false;
        }
    }

    return true;
}


bool ObjectStorageBackupJob::CreateSubTasksFromCtrlFile(
    const PrefixInfo& prefixInfo, const std::string& esn, uint32_t subTaskType)
{
    const std::string srcDir = m_scanControlPath + GetScanGenFileRelativeDir(prefixInfo);
    const std::string dstDir = m_backupControlPath + GetScanGenFileRelativeDir(prefixInfo);

    std::vector<std::string> srcFileList {};
    std::vector<SubJob> subJobList {};
    std::vector<std::string> ctrlFileList {};
    static int64_t lastCreateJobErrTime = 0;

    if (!GetCtrlFile(srcDir, dstDir, srcFileList, prefixInfo)) {
        return false;
    }

    INFOLOG("Enter CreateSubTasksFromCtrlFile, NumOfCtrlFiles: %d", srcFileList.size());

    for (uint32_t i = 0; i < srcFileList.size(); ++i) {
        if (IsAbortJob()) {
            INFOLOG("Exit Abort for taskid: %s, subtaskid: %s", m_jobId.c_str(), m_subJobId.c_str());
            return true;
        }

        std::string srcPath = srcFileList[i];
        if (!IsValidCtrlFile(subTaskType, srcPath)) {
            continue;
        }

        HCP_Log(INFO, MODULE) << "srcPath: " << srcPath << HCPENDLOG;

        std::string ctrlFileParentPath = OBSPlugin::GetPathName(srcPath);
        
        std::string ctrlFile = srcPath.substr(ctrlFileParentPath.length() + NUMBER1,
            srcPath.length() - ctrlFileParentPath.length() - NUMBER1);

        std::string dstPath = OBSPlugin::PathJoin(dstDir, ctrlFile);

        HCP_Log(INFO, MODULE) << "dstPath: " << dstPath << HCPENDLOG;

        OBSPlugin::CopyFile(srcPath, dstPath);

        SubJob subJob;
        if (!InitSubJobInfo(prefixInfo, dstPath.substr(m_cacheFsPath.length(), std::string::npos), esn, subJob)) {
            return false;
        }
        subJobList.push_back(subJob);
        ctrlFileList.push_back(srcPath);

        if (subJobList.size() % NUMBER10 != 0) {
            continue;
        }
        if (!CheckAndRetryCreateSubTask(subJobList, ctrlFileList, lastCreateJobErrTime, true,
            NeedContinueCheckpoint())) {
            HCP_Log(ERR, MODULE) << "Create copy sub task failed" << HCPENDLOG;
            return false;
        }
    }

    if (!CheckAndRetryCreateSubTask(subJobList, ctrlFileList, lastCreateJobErrTime, true, NeedContinueCheckpoint())) {
        HCP_Log(ERR, MODULE) << "Create copy sub task failed" << HCPENDLOG;
        return false;
    }

    return true;
}

bool ObjectStorageBackupJob::InitSubJobInfo(const PrefixInfo& prefixInfo, const std::string& ctrlFile,
    const std::string& esn, SubJob &subJob)
{
    std::string subTaskName;
    uint32_t subTaskType = 0;
    uint32_t subTaskPrio = 0;
    GetSubJobTypeByFileName(ctrlFile, subTaskName, subTaskType, subTaskPrio);
    ObjectStorageBackupSubJob subJobInfo;
    subJobInfo.m_SubTaskType = subTaskType;
    subJobInfo.esn = esn;
    subJobInfo.m_ControlFile = ctrlFile;
    subJobInfo.prefixInfo.emplace_back(prefixInfo);

    std::string subJobInfoStr;
    if (!Module::JsonHelper::StructToJsonString(subJobInfo, subJobInfoStr)) {
        HCP_Log(ERR, MODULE) << "Convert to json failed for subJob info: " << HCPENDLOG;
        return false;
    }
    HCP_Log(INFO, MODULE) << "subJobInfoStr is : "  << subJobInfoStr << HCPENDLOG;
    subJob.__set_jobId(m_jobId);
    subJob.__set_jobName(subTaskName);
    subJob.__set_jobType(SubJobType::BUSINESS_SUB_JOB);
    subJob.__set_policy(ExecutePolicy::ANY_NODE);
    subJob.__set_jobInfo(subJobInfoStr);
    subJob.__set_jobPriority(subTaskPrio);
    subJob.__set_ignoreFailed(true);

    return true;
}

bool ObjectStorageBackupJob::CreateBackupCopyMetaTask(const PrefixInfo& prefixInfo, const std::string& esn)
{
    ObjectStorageBackupSubJob backupSubJob;
    backupSubJob.m_SubTaskType = SUBJOB_TYPE_COPYMETA_PHASE;
    backupSubJob.esn = esn;
    backupSubJob.prefixInfo.emplace_back(prefixInfo);
    std::string backupSubJobStr;
    if (!Module::JsonHelper::StructToJsonString(backupSubJob, backupSubJobStr)) {
        HCP_Log(ERR, MODULE) << "Exit CreateBackupJobCopyMetaTask failed!" << HCPENDLOG;
        return false;
    }

    if (m_idGenerator == nullptr) {
        InitIdGenerator();
    }
    const std::string subJobName = SUBJOB_TYPE_COPYMETA_JOBNAME + std::to_string(m_idGenerator->GenerateId());
    HCP_Log(INFO, MODULE) << "Create sub job " << subJobName
        << " for prefix sub dir " << GetScanGenFileRelativeDir(prefixInfo) << HCPENDLOG;

    SubJob subJob;
    subJob.__set_jobId(m_jobId);
    subJob.__set_jobName(subJobName);
    subJob.__set_jobType(SubJobType::BUSINESS_SUB_JOB);
    subJob.__set_policy(ExecutePolicy::ANY_NODE);
    subJob.__set_jobInfo(backupSubJobStr);
    subJob.__set_jobPriority(SUBJOB_TYPE_COPYMETA_PHASE_PRIO);
    subJob.__set_ignoreFailed(false);
    do {
        if (IsAbortJob()) {
            HCP_Log(WARN, MODULE) << "Exit received Abort for taskid: " << m_jobId << ", subtaskid: "
                << m_subJobId << HCPENDLOG;
            break;
        }
        int ret = CreateSubTask(subJob);
        if (ret == Module::SUCCESS) {
            break;
        } else if (ret == Module::RETRY) {
            HCP_Log(WARN, MODULE) << "Create subtask failed with retriable error"<< HCPENDLOG;
            SleepForCreateSubTaskError();
            continue;
        } else {
            HCP_Log(ERR, MODULE) << "Exit CreateBackupCopyMetaTask, Create subtask failed" << HCPENDLOG;
            return false;
        }
    } while (true);

    return true;
}

int ObjectStorageBackupJob::ExecuteDataCopySubJob(ObjectStorageBackupSubJob& subJob)
{
    INFOLOG("Enter ExecuteDataCopySubJob: %s, %s, controlFile: %s, type:%u, storageAbnormal:%s, esn:%s",
        m_jobId.c_str(), m_subJobId.c_str(), subJob.m_ControlFile.c_str(), subJob.m_SubTaskType,
        subJob.storageAbnormal.c_str(), subJob.esn.c_str());

    if (NeedContinueCheckpoint()) {
        CreateControlCheckpointFile(subJob);
    }
    
    if (IsMultiNodeBackup() && (subJob.storageAbnormal == TRUE_STR)) {
        return SwitchStorageForMutiNodeBackup(subJob);
    }
    auto backupSubJob = subJob;

    if (SkipDeleteStage(backupSubJob)) {
        return Module::SUCCESS;
    }

    BackupStats backupStatistics {};
    int jobProgress = 0;
    uint32_t retryCnt = 0;
    SubJobStatus::type jobStatus = SubJobStatus::FAILED;
    std::string jobLogLabel = "";

    m_nodeLevelTaskInfo.Insert(m_jobId);
    m_nodeLevelTaskInfo.IncrSubTasksCount();
    PrintJobInfo();
    backupSubJob.m_ControlFile = m_cacheFsPath + dir_sep + backupSubJob.m_ControlFile;
    MONITOR_BACKUP_RES_TYPE monitorRet;

    const time_t backupStartTime = OBSPlugin::GetCurrentTimeInSeconds();
    do {
        if (!StartBackup(backupSubJob)) {
            HCP_Log(ERR, MODULE) << "StartBackup failed" << HCPENDLOG;
            break;
        }
        monitorRet = MonitorBackup(backupStatistics, jobStatus, jobLogLabel, jobProgress);
        if (m_backup != nullptr) {
            m_backup->Destroy();
            m_backup.reset();
        }
    } while (monitorRet == MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_NEEDRETRY && ++retryCnt < BACKUP_RETRY_CNT);
    const time_t backupEndTime = OBSPlugin::GetCurrentTimeInSeconds();

    UpdateBackupSubTaskStatistics(backupStatistics, m_cacheFsPath, backupStartTime, backupEndTime);

    m_dataSize = backupStatistics.noOfBytesCopied / NUMBER1024;
    ReportBackupStatus(jobStatus, jobLogLabel);
    m_nodeLevelTaskInfo.DecrSubTasksCount();

    if (NeedContinueCheckpoint()) {
        ChangeJobStatus(jobStatus, subJob);
    }

    return Module::SUCCESS;
}

void  ObjectStorageBackupJob::ChangeJobStatus(SubJobStatus::type jobStatus, ObjectStorageBackupSubJob& subJob)
{
    HCP_Log(INFO, MODULE) << "Object Backup SubjobStatus " << jobStatus << HCPENDLOG;
    if (jobStatus != SubJobStatus::COMPLETED) {
        std::string statsFilePath = m_cacheFsPath +"/obs-statistics/backup-stats-sub-" +  m_subJobId + ".json";
        OBSPlugin::RemoveFile(statsFilePath);
        DeleteControlCheckpointFile(subJob);
        HCP_Log(INFO, MODULE) << "Date backup fail" << HCPENDLOG;
        RecordErrInfo(DATASTATUS);
    } else {
        std::string successName = "Success_" + OBSPlugin::GetFileName(subJob.checkpointCachePath);
        std::string successPath = PathJoin(OBSPlugin::GetPathName(subJob.checkpointCachePath), successName);
        INFOLOG("Success checkpoint path %s ", successPath.c_str());
        OBSPlugin::Rename(subJob.checkpointCachePath, successPath);
        WriteToCache(m_checkPointRecord);
    }
}

bool ObjectStorageBackupJob::SkipDeleteStage(const ObjectStorageBackupSubJob& backupSubJob)
{
    if (!IsFullBackup() && IsAggregate() && backupSubJob.m_ControlFile.find("delete_") != std::string::npos) {
        INFOLOG("Notd handle delete_ control file for Aggregated increment backup now");
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::COMPLETED, OBSPlugin::PROGRESS100);
        return true;
    }

    return false;
}

bool ObjectStorageBackupJob::StartBackup(const ObjectStorageBackupSubJob& backupSubJob)
{
    BackupParams backupParams {};
    FillBackupConfig(backupSubJob, backupParams);

    m_backup = FS_Backup::BackupMgr::CreateBackupInst(backupParams);
    if (m_backup == nullptr) {
        HCP_Log(ERR, MODULE) << "Create backup instance failed" << HCPENDLOG;
        return false;
    }

    if (m_backup->Enqueue(backupSubJob.m_ControlFile) != BackupRetCode::SUCCESS) {
        HCP_Log(ERR, MODULE) << "enqueue backup instance failed" << HCPENDLOG;
        return false;
    }
    if (m_backup->Start() != BackupRetCode::SUCCESS) {
        HCP_Log(ERR, MODULE) << "Start backup instance failed" << HCPENDLOG;
        return false;
    }

    return true;
}

void ObjectStorageBackupJob::FillBackupConfig(const ObjectStorageBackupSubJob& backupSubJob, BackupParams& backupParams)
{
    ReadEncodeBucket(m_bucketPrefixMap, m_encodeSwitch);
    FillBackupEngineInfo(backupSubJob, backupParams);
    FillBackupConfigPhase(backupSubJob, backupParams);
    FillBackupCommonParams(backupParams, m_metaFsPathMap[backupSubJob.esn]);
    backupParams.commonParams.controlFile = OBSPlugin::GetFileName(backupSubJob.m_ControlFile);
    backupParams.backupType = IsFullBackup() ? BackupType::BACKUP_FULL : BackupType::BACKUP_INC;
    backupParams.scanAdvParams.metaFilePath =
        m_scanMetaPath + GetScanGenFileRelativeDir(backupSubJob.prefixInfo.front()) + "latest";
    backupParams.scanAdvParams.useXmetaFileHandle = false;
}

void ObjectStorageBackupJob::FillBackupEngineInfo(const ObjectStorageBackupSubJob& backupSubJob,
    BackupParams& backupParams)
{
    std::string certContent = OBSPlugin::Base64Decode(m_authInfo.certification);
    CertManager certMgr(m_certFilePath, m_jobId);
    certMgr.SaveFile(certContent);
    backupParams.srcEngine = BackupIOEngine::OBJECTSTORAGE;
    ObjectBackupAdvanceParams objectBackupAdvanceParams;
    objectBackupAdvanceParams.authArgs.storageType = m_authInfo.getStorageType();
    objectBackupAdvanceParams.authArgs.verifyInfo.accessKey = m_authInfo.ak;
    objectBackupAdvanceParams.authArgs.verifyInfo.secretKey = m_authInfo.sk;
    objectBackupAdvanceParams.authArgs.verifyInfo.endPoint = m_authInfo.endPoint;
    objectBackupAdvanceParams.authArgs.verifyInfo.useHttps = m_authInfo.openHttps();
    objectBackupAdvanceParams.authArgs.verifyInfo.certHttps = certContent;
    objectBackupAdvanceParams.authArgs.verifyInfo.caPath = certMgr.GetCAPath();
    objectBackupAdvanceParams.authArgs.verifyInfo.caFile = certMgr.GetCAFile();
    objectBackupAdvanceParams.authArgs.verifyInfo.useProxy = m_authInfo.openProxy();
    objectBackupAdvanceParams.authArgs.verifyInfo.proxyHostName = m_authInfo.proxyHostName;
    objectBackupAdvanceParams.authArgs.verifyInfo.proxyUserName = m_authInfo.proxyUserName;
    objectBackupAdvanceParams.authArgs.verifyInfo.proxyUserPwd = m_authInfo.proxyUserPwd;
    for (const auto& object : backupSubJob.prefixInfo) {
        ObsBucket objectStorageBucket;
        objectStorageBucket.encodeEnable = CheckEncode(m_authInfo.endPoint, object.bucketName, m_bucketPrefixMap,
            m_encodeSwitch);
        objectStorageBucket.bucketName = object.bucketName;
        objectStorageBucket.prefix.assign(object.subPrefixs.begin(), object.subPrefixs.end());
        objectBackupAdvanceParams.buckets.emplace_back(objectStorageBucket);
    }
    objectBackupAdvanceParams.threadNum = GetConfigIntKey(
        OBSPlugin::DME_OBS_CONFIG_SECTION, "DME_OBS_BACKUP_SERVER_THREAD_NUM");
    objectBackupAdvanceParams.maxMemory = GetConfigIntKey(
        OBSPlugin::DME_OBS_CONFIG_SECTION, "DME_OBS_BACKUP_SERVER_MAX_MEMORY_SIZE");
    objectBackupAdvanceParams.saveMeta = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
        "DME_OBS_BACKUP_META") > 0;
    objectBackupAdvanceParams.excludeMeta =
            GetConfigStrKey(DME_OBS_CONFIG_SECTION, "DME_OBS_BACKUP_EXCLUSE_META");

    backupParams.srcAdvParams = std::make_shared<ObjectBackupAdvanceParams>(objectBackupAdvanceParams);

    backupParams.dstEngine = BackupIOEngine::POSIX;
    HostBackupAdvanceParams posixBackupAdvanceParams;
    posixBackupAdvanceParams.dataPath = m_dataFsPathMap[backupSubJob.esn];
    posixBackupAdvanceParams.threadNum = GetConfigIntKey(
        OBSPlugin::DME_OBS_CONFIG_SECTION, "DME_OBS_BACKUP_SERVER_THREAD_NUM");
    posixBackupAdvanceParams.maxMemory = GetConfigIntKey(
        OBSPlugin::DME_OBS_CONFIG_SECTION, "DME_OBS_BACKUP_SERVER_MAX_MEMORY_SIZE");
    backupParams.dstAdvParams = std::make_shared<HostBackupAdvanceParams>(posixBackupAdvanceParams);
}

void ObjectStorageBackupJob::FillBackupConfigPhase(const ObjectStorageBackupSubJob& backupSubJob,
    BackupParams& backupParams)
{
    if (backupSubJob.m_SubTaskType == OBSPlugin::SUBJOB_TYPE_DATACOPY_COPY_PHASE) {
        backupParams.phase = BackupPhase::COPY_STAGE;
    } else if (backupSubJob.m_SubTaskType == OBSPlugin::SUBJOB_TYPE_DATACOPY_DELETE_PHASE) {
        backupParams.phase = BackupPhase::DELETE_STAGE;
    }
}

void ObjectStorageBackupJob::FillBackupCommonParams(BackupParams& backupParams, const std::string& metaPath)
{
    backupParams.commonParams.jobId = m_jobId;
    backupParams.commonParams.subJobId = m_subJobId;
    backupParams.commonParams.reqID = m_subJobRequestId;
    backupParams.commonParams.metaPath = metaPath;
    backupParams.commonParams.writeDisable = false;
    backupParams.commonParams.writeMeta = false;
    backupParams.commonParams.useSubJobSqlite = true;
    backupParams.commonParams.failureRecordRootPath = m_failureRecordRoot;

    backupParams.commonParams.sqliteLocalPath = GetConfigStrKey(
        DME_OBS_CONFIG_SECTION, "OBS_SQLITE_DIR");
    if (m_authInfo.getStorageType() == StorageType::ALI) {
        backupParams.commonParams.maxBufferCnt = GetConfigIntKey(
            OBSPlugin::DME_OBS_CONFIG_SECTION, "DME_OBS_BACKUP_MAX_ALI_QPS_RW");
    } else {
        backupParams.commonParams.maxBufferCnt = GetConfigIntKey(
            OBSPlugin::DME_OBS_CONFIG_SECTION, "DME_OBS_BACKUP_MAX_OBS_QPS_GET");
    }
    backupParams.commonParams.maxBufferSize = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION,
        "DME_OBS_BACKUP_MAX_TOTAL_BLOCK_BUFFER_SIZE");

    HCP_Log(INFO, MODULE) << "Backup params meta path:" << backupParams.commonParams.metaPath << HCPENDLOG;

    if (IsAggregate()) {
        FillAggregateBackupCommonParams(backupParams);
    } else {
        backupParams.commonParams.backupDataFormat = BackupDataFormat::NATIVE;
        backupParams.commonParams.genSqlite = true;
    }
}

void ObjectStorageBackupJob::FillAggregateBackupCommonParams(BackupParams& backupParams)
{
    backupParams.commonParams.backupDataFormat = BackupDataFormat::AGGREGATE;
    backupParams.commonParams.maxAggregateFileSize = std::stoul(m_protectedOBS.advParms.m_maxSizeAfterAggregate);
    backupParams.commonParams.maxFileSizeToAggregate = std::stoul(m_protectedOBS.advParms.m_maxSizeToAggregate);
    backupParams.commonParams.aggregateThreadNum =
        GetConfigIntKey(DME_OBS_CONFIG_SECTION, "DME_OBS_AGGR_THREAD_POOL_CNT");
    HCP_Log(INFO, MODULE) <<" FillBackupCommonParams Aggregate Info,"
        << " maxAggregateFileSize: " << backupParams.commonParams.maxAggregateFileSize
        << " maxFileSizeToAggregate: " << backupParams.commonParams.maxFileSizeToAggregate
        << " aggregateThreadNum: " << backupParams.commonParams.aggregateThreadNum
        << HCPENDLOG;
}

ObjectStorageBackupJob::MONITOR_BACKUP_RES_TYPE ObjectStorageBackupJob::MonitorBackup(BackupStats& backupStatistics,
    SubJobStatus::type& jobStatus, std::string& jobLogLabel, int& jobProgress)
{
    HCP_Log(INFO, MODULE) << "Enter Monitor Backup" << HCPENDLOG;
    jobStatus = SubJobStatus::RUNNING;
    jobProgress = 0;
    jobLogLabel = "";
    BackupStats tmpStats;
    time_t statLastUpdateTime = OBSPlugin::GetCurrentTimeInSeconds();
    GetBackupFailDetails();

    do {
        m_backupStatus = m_backup->GetStatus();
        HCP_Log(INFO, MODULE) << "m_backupStatus:" << static_cast<int>(m_backupStatus) << HCPENDLOG;

        tmpStats = m_backup->GetStats();
        /* 若文件已经全被write，但仍然未完成说明聚合的sql任务还未结束，不用重新备份 */
        if (backupStatistics != tmpStats) {
            statLastUpdateTime = OBSPlugin::GetCurrentTimeInSeconds();
            INFOLOG("backup statistics last update time: %ld", statLastUpdateTime);
            backupStatistics = tmpStats;
        } else if (m_backupStatus == BackupPhaseStatus::INPROGRESS &&
            (tmpStats.noOfFilesCopied + tmpStats.noOfFilesFailed != tmpStats.noOfFilesToBackup) &&
            OBSPlugin::GetCurrentTimeInSeconds() - statLastUpdateTime >
            GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION, "BACKUP_STUCK_TIME")) {
            // 3000s
            // 已拷贝文件数量+已失败文件数量 不等于 待备份文件数量/总文件数量
            HandleMonitorStuck(backupStatistics, jobStatus, jobLogLabel, jobProgress);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_NEEDRETRY;
        }
        UpdateBackupStatistics(backupStatistics);

        if (m_backupStatus == BackupPhaseStatus::COMPLETED) {
            HandleMonitorComplete(jobStatus, jobLogLabel, jobProgress);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_SUCCESS;
        } else if (m_backupStatus == BackupPhaseStatus::FAILED ||
            m_backupStatus == BackupPhaseStatus::FAILED_NOACCESS ||
            m_backupStatus == BackupPhaseStatus::FAILED_NOSPACE ||
            m_backupStatus == BackupPhaseStatus::FAILED_SEC_SERVER_NOTREACHABLE ||
            m_backupStatus == BackupPhaseStatus::FAILED_PROT_SERVER_NOTREACHABLE) {
            HandleMonitorFailed(jobStatus, jobLogLabel);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_FAILED;
        } else if (m_backupStatus == BackupPhaseStatus::ABORTED) {
            HandleMonitorAborted(jobStatus, jobLogLabel);
            return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_ABORTED;
        } else if (m_backupStatus == BackupPhaseStatus::ABORT_INPROGRESS) {
            jobStatus = SubJobStatus::ABORTING;
            jobLogLabel = "";
        }

        if (IsAbortJob()) {
            INFOLOG("Abort Backup taskId: %s, subtask: %s", m_jobId.c_str(), m_subJobId.c_str());
            m_backup->Abort();
        }
        // 20s检查一次状态
        Module::SleepFor(std::chrono::seconds(EXECUTE_SUBTASK_MONITOR_DUR_IN_SEC));
    } while (true);

    HCP_Log(INFO, MODULE) << "Exit Monitor Backup" << HCPENDLOG;

    return MONITOR_BACKUP_RES_TYPE::MONITOR_BACKUP_RES_TYPE_SUCCESS;
}

void ObjectStorageBackupJob::HandleMonitorStuck(BackupStats& backupStatistics,
    SubJobStatus::type& jobStatus, std::string& jobLogLabel, int& jobProgress)
{
    WARNLOG("backup statistic has not been update for 300s");
    if (BackupRetCode::SUCCESS != m_backup->Abort()) {
        HCP_Log(ERR, MODULE) << "backup Abort is failed" << HCPENDLOG;
    }
    backupStatistics.noOfDirFailed += backupStatistics.noOfDirToBackup - backupStatistics.noOfDirCopied;
    backupStatistics.noOfFilesFailed += backupStatistics.noOfFilesToBackup - backupStatistics.noOfFilesCopied;
    UpdateBackupStatistics(backupStatistics);
    jobLogLabel = "";
    jobProgress = OBSPlugin::PROGRESS100;
    jobStatus = SubJobStatus::COMPLETED;
}

bool ObjectStorageBackupJob::UpdateScanStatInfo(const PrefixInfo& prefixInfo,
    ObjectStorageNativeScanStatistics& scanStatistics)
{
    // 更新每个前缀扫描统计数据,用于上报
    UpdateScannerSubTaskStatistics(prefixInfo, scanStatistics, m_scanStatisticsPath, m_subJobId);

    bool bReportToPM = false;
    if (!UpdateScanStatsResourceWithLock(scanStatistics, bReportToPM)) {
        return false;
    }
    if (!bReportToPM) {
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);
    } else {
        ReportScanRunningStatus();
    }
    return true;
}

bool ObjectStorageBackupJob::ReportScanRunningStatus()
{
    ObjectStorageNativeScanStatistics mainScanJobStatistics {};
    if (!CalcSubScanStats(m_scanStatisticsPath, false, mainScanJobStatistics)) {
        return false;
    }
    if (mainScanJobStatistics.m_totFiles != 0) {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_scan_inprogress_label",
            0,
            std::to_string(mainScanJobStatistics.m_totFiles),
            FormatCapacity(mainScanJobStatistics.m_totalSize),
            std::to_string(mainScanJobStatistics.m_totFilesToBackup),
            FormatCapacity(mainScanJobStatistics.m_totalSizeToBackup));
    } else {
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);
    }
    return true;
}

bool ObjectStorageBackupJob::UpdateBackupStatistics(BackupStats& backupStatistics)
{
    m_dataSize = backupStatistics.noOfBytesCopied / NUMBER1024;
    UpdateBackupSubTaskStatistics(backupStatistics, m_cacheFsPath, 0, 0);
    PrintBackupStatistics(backupStatistics, m_jobId, m_backupStatus, m_subJobId);

    if (!m_nodeLevelTaskInfo.CanSendLogReportToPM(m_jobId)) {
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);
        return true;
    }

    ObjectStorageNativeBackupStats backupStatsResource {};
    bool bReportToPM = false;

    // UBC共享资源锁，用于管控上报label
    // jobId+backup-stats-key
    if (!LockBackupStatsResource(m_jobId)) {
        // plugin restart, UBC deadlock, try to unlock
        UnlockBackupStatsResource(m_jobId);
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);
        return false;
    }

    if (!GetBackupStatsResource(m_jobId, backupStatsResource)) {
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);
        UnlockBackupStatsResource(m_jobId);
        return false;
    }

    // 120s，双控上报的时间间隔，给PM打标签
    if ((GetCurrentTimeInSeconds() - backupStatsResource.m_lastLogReportTime) >
        OBSPlugin::BACKUP_REPORT_CIRCLE_TIME_IN_SEC) {
        bReportToPM = true;
        backupStatsResource.m_lastLogReportTime = GetCurrentTimeInSeconds();
        UpdateBackupStatsResource(m_jobId, backupStatsResource);
    }
    UnlockBackupStatsResource(m_jobId);

    if (!bReportToPM) {
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);
    } else {
        ReportBackupRunningStatus();
    }

    return true;
}

bool ObjectStorageBackupJob::ReportBackupRunningStatus()
{
    ObjectStorageNativeBackupStats mainBackupJobStatistics {};

    std::vector<std::string> fileList {};
    if (!GetFileListInDirectory(m_statisticsPath, fileList)) {
        HCP_Log(ERR, MODULE) << "Get filelist for dir failed: " << m_statisticsPath << HCPENDLOG;
        return false;
    }
    for (std::size_t i = 0; i < fileList.size(); ++i) {
        ObjectStorageNativeBackupStats subBackupJobStatistics {};
        if (!ReadBackupStatsFromFile(fileList[i], subBackupJobStatistics)) {
            continue;
        }
        mainBackupJobStatistics.m_noOfFilesCopied += subBackupJobStatistics.m_noOfFilesCopied;
        mainBackupJobStatistics.m_noOfBytesCopied += subBackupJobStatistics.m_noOfBytesCopied;
    }

    // 上报备份进度，会打标签
    if (mainBackupJobStatistics.m_noOfBytesCopied != 0) {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_data_inprogress_label",
            0,
            std::to_string(mainBackupJobStatistics.m_noOfFilesCopied),
            FormatCapacity(mainBackupJobStatistics.m_noOfBytesCopied));
    } else {
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);
    }

    return true;
}

void ObjectStorageBackupJob::HandleMonitorComplete(
    SubJobStatus::type& jobStatus, std::string& jobLogLabel, int& jobProgress)
{
    HCP_Log(INFO, MODULE) << "Monitor Backup - BACKUP_PHASE_STATUS_COMPLETED " << HCPENDLOG;
    jobLogLabel = "";
    jobProgress = OBSPlugin::PROGRESS100;
    jobStatus = SubJobStatus::COMPLETED;
}

void ObjectStorageBackupJob::HandleMonitorFailed(SubJobStatus::type& jobStatus, std::string& jobLogLabel)
{
    HCP_Log(ERR, MODULE) << "Monitor Backup - BACKUP_PHASE_STATUS_FAILED " << HCPENDLOG;
    jobStatus = SubJobStatus::FAILED;
    jobLogLabel = "object_storage_plugin_backup_data_fail_label";
}

void ObjectStorageBackupJob::HandleMonitorAborted(SubJobStatus::type& jobStatus, std::string& jobLogLabel)
{
    HCP_Log(ERR, MODULE) << "Monitor Backup - BACKUP_PHASE_STATUS_ABORTED " << HCPENDLOG;
    jobStatus = SubJobStatus::ABORTED;
    jobLogLabel = "";
}

void ObjectStorageBackupJob::GetBackupFailDetails()
{
    std::unordered_set<FailedRecordItem, FailedRecordItemHash> failedRecordItem = m_backup->GetFailedDetails();
    if (!failedRecordItem.empty()) {
        auto it = failedRecordItem.begin();
        m_backErrMsg = it->errMsg;
        HCP_Log(ERR, MODULE) << "m_backErrMsg" << m_backErrMsg.c_str() << HCPENDLOG;
    }
    HCP_Log(DEBUG, MODULE) << "No Erorr Message" << HCPENDLOG;
}

void ObjectStorageBackupJob::ReportBackupStatus(const SubJobStatus::type& jobStatus, const std::string& jobLogLabel)
{
    if (jobStatus ==  SubJobStatus::COMPLETED) {
        ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::COMPLETED, OBSPlugin::PROGRESS100);
    } else {
        int64_t errCode = ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR;
        switch (m_backupStatus) {
            case BackupPhaseStatus::FAILED_NOACCESS:
                errCode = ObsErrorCode::ERROR_BACKUP_FAILED_NOACCESS_ERROR;
                break;
            case BackupPhaseStatus::FAILED_NOSPACE:
                errCode = ObsErrorCode::ERROR_BACKUP_FAILED_NOSPACE_ERROR;
                break;
            case BackupPhaseStatus::FAILED_PROT_SERVER_NOTREACHABLE:
                errCode = ObsErrorCode::ERROR_OBS_BACKUP_PROTECTED_SERVER_NOT_REACHABLE;
                break;
            case BackupPhaseStatus::FAILED_SEC_SERVER_NOTREACHABLE:
                errCode = ObsErrorCode::ERROR_OBS_BACKUP_SECONDARY_SERVER_NOT_REACHABLE;
                break;
            default:
                errCode = ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR;
        }
        if (!m_backErrMsg.empty() && errCode == ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR) {
            ReportJobDetailsWithDetailAndErrcode(std::make_tuple(JobLogLevel::TASK_LOG_ERROR, jobStatus,
                OBSPlugin::PROGRESS0), jobLogLabel, errCode, m_backErrMsg);
        } else {
            ReportJobDetailsWithLabelAndExtendInfo(std::make_tuple(JobLogLevel::TASK_LOG_ERROR,
                jobStatus, OBSPlugin::PROGRESS0), jobLogLabel, errCode, GetStorageStatusExtend(errCode));
        }
    }
}

int ObjectStorageBackupJob::ExecuteTeardownSubJob()
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);

    ObjectStorageLastCopyDetails newBackupCopy;
    HCP_Log(INFO, MODULE) << "Enter ExecuteTeardownSubJobInner" << HCPENDLOG;

    PrintJobInfo();
    if (!GetSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        HCP_Log(ERR, MODULE) << "GetSharedResources failed" << HCPENDLOG;
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_data_fail_label",
            ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        return Module::FAILED;
    }

    FillBackupCopyDetails(newBackupCopy);

    // write backup-copy-meta.json to root of meta repo
    HCP_Log(DEBUG, MODULE) << "Save LastCopyDetails to path:" << m_backupCopyInfoFilePath << HCPENDLOG;
    if (!WriteBackupCopyToFile(m_backupCopyInfoFilePath, newBackupCopy)) {
        HCP_Log(ERR, MODULE) << "write root json file failed" << HCPENDLOG;
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_data_fail_label",
            ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        return Module::FAILED;
    }

    CalculateJobStats();
    ReportBackupCompletionStatus();
    return Module::SUCCESS;
}

int ObjectStorageBackupJob::ExecuteScanTeardownSubJob()
{
    if (!CalculateScanJobStats()) {
        HCP_Log(ERR, MODULE) << "CalculateScanJobStats failed, jobId:" << m_backupJobPtr->jobId << HCPENDLOG;
        if (NeedContinueCheckpoint()) {
            RecordErrInfo(SCANSTATUS);
        }
        return Module::FAILED;
    }

    HCP_Log(INFO, MODULE) << "CalculateScan success" << HCPENDLOG;
    if (NeedContinueCheckpoint()) {
        m_checkPointRecord.errorCode[SCANSTATUS] = "done";
    }
    WriteToCache(m_checkPointRecord);
    return Module::SUCCESS;
}

bool ObjectStorageBackupJob::CalculateScanJobStats()
{
    ObjectStorageNativeScanStatistics mainJobStats {};

    if (NeedContinueCheckpoint() && m_checkPointRecord.errorCode[SCANSTATUS] == "done") {
        std::string m_lastScanPath = m_scanStatisticsPath + "/scan-stats-main-" + m_jobId + ".json";
        if (!JsonFileTool::ReadFromFile(m_lastScanPath, mainJobStats)) {
            HCP_Log(ERR, MODULE) << "Read from file failed: " << m_lastScanPath << HCPENDLOG;
            return false;
        }
        ReportScannerCompleteStatus(mainJobStats);
        return true;
    }

    if (!CalcSubScanStats(m_scanStatisticsPath, true, mainJobStats)) {
        return false;
    }

    std::string filePath = m_scanStatisticsPath + "/scan-stats-main-" + m_backupJobPtr->jobId + ".json";
    if (!JsonFileTool::WriteToFile(mainJobStats, filePath)) {
        HCP_Log(ERR, MODULE) << "Write to file failed: " << filePath << HCPENDLOG;
        return false;
    }
    ReportScannerCompleteStatus(mainJobStats);
    return true;
}

void ObjectStorageBackupJob::FillBackupCopyDetails(ObjectStorageLastCopyDetails& newBackupCopy)
{
    newBackupCopy.m_lastBackupTime = m_lastBackupTime;
    newBackupCopy.m_obsProtectSubObjectList = m_protectedOBS.obsProtectSubObjectList;
    newBackupCopy.m_isBackupAcl = m_protectedOBS.advParms.m_isBackupAcl;
    newBackupCopy.m_multiNodeBackupSwitch = m_protectedOBS.advParms.m_multiNodeBackupSwitch;
}

bool ObjectStorageBackupJob::WriteBackupCopyToFile(
    const std::string& metaFileFullPath, ObjectStorageLastCopyDetails &backupCopy)
{
    std::ofstream writeFd {};
    uint32_t retryCnt = 0;

    do {
        writeFd.open(metaFileFullPath.c_str(), std::ios::out);
        if (writeFd.is_open()) {
            break;
        }
        writeFd.close();
        Module::SleepFor(std::chrono::seconds(NUMBER1));
    } while (++retryCnt < NUMBER3);

    if (!writeFd.is_open()) {
        char errmsg[NUMBER256];
        strerror_r(errno, errmsg, NUMBER256);
        HCP_Log(ERR, MODULE) << "Write " << WIPE_SENSITIVE(metaFileFullPath)
            << " failed, ERR: " << errmsg << HCPENDLOG;

        return false;
    }

    std::string jsonFileContent {};
    if (!Module::JsonHelper::StructToJsonString(backupCopy, jsonFileContent)) {
        HCP_Log(ERR, MODULE) << "Write " << WIPE_SENSITIVE(metaFileFullPath)
        << " failed, ERR: Json covert failed" << HCPENDLOG;
        writeFd.close();
        return false;
    }

    HCP_Log(INFO, MODULE) << "write backup copy file, content = " << jsonFileContent << HCPENDLOG; // TO-DO remove
    writeFd << jsonFileContent;
    writeFd.close();

    return true;
}

bool ObjectStorageBackupJob::CalculateJobStats()
{
    ObjectStorageNativeBackupStats mainJobStats {};

    std::vector<std::string> fileList {};
    if (!OBSPlugin::GetFileListInDirectory(m_statisticsPath, fileList)) {
        HCP_Log(ERR, MODULE) << "Get filelist for dir failed: " << m_statisticsPath << HCPENDLOG;
        return false;
    }
    mainJobStats.m_backupStartTime = std::numeric_limits<time_t>::max();
    mainJobStats.m_backupEndTime = 0;
    for (std::size_t i = 0; i < fileList.size(); i++) {
        if (i % NUMBER50 == 0) {
            /* Since this loop may take too much time, report progress to framework, every 50 files */
            ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);
        }
        std::string path = fileList[i];
        ObjectStorageNativeBackupStats subJobStats {};
        ReadBackupStatsFromFile(path, subJobStats);
        mainJobStats.m_noOfFilesToBackup  += subJobStats.m_noOfFilesToBackup;
        mainJobStats.m_noOfBytesToBackup  += subJobStats.m_noOfBytesToBackup;
        mainJobStats.m_noOfFilesToDelete  += subJobStats.m_noOfFilesToDelete;
        mainJobStats.m_noOfFilesCopied    += subJobStats.m_noOfFilesCopied;
        mainJobStats.m_noOfBytesCopied    += subJobStats.m_noOfBytesCopied;
        mainJobStats.m_noOfFilesDeleted   += subJobStats.m_noOfFilesDeleted;
        mainJobStats.m_noOfFilesFailed    += subJobStats.m_noOfFilesFailed;
        mainJobStats.m_noOfSrcRetryCount  += subJobStats.m_noOfSrcRetryCount;
        mainJobStats.m_noOfDstRetryCount  += subJobStats.m_noOfDstRetryCount;
        mainJobStats.m_backupStartTime = std::min(mainJobStats.m_backupStartTime, subJobStats.m_backupStartTime);
        mainJobStats.m_backupEndTime = std::max(mainJobStats.m_backupEndTime, subJobStats.m_backupEndTime);
        OBSPlugin::RemoveFile(path);
    }

    std::string filePath = m_statisticsPath + "/backup-stats-main-" + m_backupJobPtr->jobId + ".json";
    WriteBackupStatsToFile(filePath, mainJobStats);

    return true;
}

bool ObjectStorageBackupJob::ReportBackupCompletionStatus()
{
    ObjectStorageNativeBackupStats backupStatistics {};
    std::string filePath = m_statisticsPath + "/backup-stats-main-" + m_jobId + ".json";
    ReadBackupStatsFromFile(filePath, backupStatistics);

    /* As we report this from teardown-subjob or postjob, set datasize to 0. SO that UBC do not consider this size
      for speed calc */
    if (backupStatistics.m_noOfFilesFailed != 0) {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_WARNING, SubJobStatus::FAILED, OBSPlugin::PROGRESS100),
            "object_storage_plugin_backup_data_completed_with_warn_label",
            0,
            std::to_string(backupStatistics.m_noOfFilesCopied),
            FormatCapacity(backupStatistics.m_noOfBytesCopied),
            std::to_string(backupStatistics.m_noOfFilesFailed));
    } else {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_INFO, SubJobStatus::COMPLETED, OBSPlugin::PROGRESS100),
            "object_storage_plugin_backup_data_completed_label",
            0,
            std::to_string(backupStatistics.m_noOfFilesCopied),
            FormatCapacity(backupStatistics.m_noOfBytesCopied));
    }
    return true;
}

bool ObjectStorageBackupJob::DeleteControlCheckpointFile(ObjectStorageBackupSubJob& subJob)
{
    if (!NeedContinueCheckpoint()) {
        return true;
    }
    if (subJob.checkpointCachePath.empty()) {
        return false;
    }

    HCP_Log(INFO, MODULE) << "DeleteControlCheckpointFile path " << subJob.checkpointCachePath << HCPENDLOG;
    if (!OBSPlugin::RemoveFile(subJob.checkpointCachePath)) {
        HCP_Log(ERR, MODULE) << "DeleteControlCheckpointFile failed" << HCPENDLOG;
        return false;
    }
    HCP_Log(INFO, MODULE) << "DeleteControlCheckpointFile already" << HCPENDLOG;
    return true;
}

bool ObjectStorageBackupJob::CreateControlCheckpointFile(ObjectStorageBackupSubJob& subJob)
{
    std::string subCheckPath;
    std::string ctrlPath = subJob.m_ControlFile;
    size_t lastSlashPos = ctrlPath.rfind('/');
    std::string ctrlname = ctrlPath.substr(lastSlashPos + 1);
    subCheckPath = PathJoin(m_subCheckPath, ctrlname);
    HCP_Log(INFO, MODULE) << "ctrlname: " << subCheckPath <<HCPENDLOG;

    if (!fs::is_regular_file(subCheckPath)) {
        CreateDirectory(m_subCheckPath);
        std::ofstream jsonFile(subCheckPath);
        if (!jsonFile.is_open()) {
            HCP_Log(INFO, MODULE) << "subCheckPath create fail" << HCPENDLOG;
            return false;
        }
        jsonFile.close();
    }

    subJob.checkpointCachePath = subCheckPath;
    return true;
}

int ObjectStorageBackupJob::RecordErrInfo(uint32_t number)
{
    HCP_Log(INFO, MODULE) << "Err code: " << number << HCPENDLOG;
    m_checkPointRecord.errorCode[number] = "false";

    if (!WriteToCache(m_checkPointRecord)) {
        return Module::FAILED;
    }

    return Module::SUCCESS;
}

int ObjectStorageBackupJob::ExecuteCopyMetaSubJob(ObjectStorageBackupSubJob& subJob)
{
    ABORT_ENDTASK(m_logSubJobDetails, m_logResult, m_logDetailList, m_logDetail, 0, 0);
    ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0);

    m_isCompressMeta = GetConfigIntKey(OBSPlugin::DME_OBS_CONFIG_SECTION, "DME_OBS_COMPRESS_META_DATA");
    HCP_Log(INFO, MODULE) << "m_isCompressMeta: " << m_isCompressMeta << HCPENDLOG;
    
    PrintJobInfo();
    const std::string prefixSubStr = GetScanGenFileRelativeDir(subJob.prefixInfo.front());
    const std::string metaPath = m_metaFsPathMap[subJob.esn];
    HCP_Log(INFO, MODULE) << "Begin Copy Meta file! : " << prefixSubStr
        <<", metaPath:"<< metaPath << ",esn:"<< subJob.esn << HCPENDLOG;
    bool isCopying = true;
    bool isZipSuccess = true;
    std::thread monitorCopyThread = std::thread(
        &ObjectStorageBackupJob::CopyMetaFileToMetaRepo,
        this, prefixSubStr, metaPath, std::ref(isCopying), std::ref(isZipSuccess));

    while (isCopying) {
        HCP_Log(INFO, MODULE) << "Wait for " << prefixSubStr << " copy finish!" << HCPENDLOG;
        SendJobReportForAliveness();
        std::this_thread::sleep_for(std::chrono::seconds(GENERATE_SUBTASK_MONITOR_DUR_IN_SEC));
    }
    monitorCopyThread.join();

    if (!isZipSuccess) {
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_data_fail_label",
            ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        if (NeedContinueCheckpoint()) {
            RecordErrInfo(METASTATUS);
            OBSPlugin::Remove(m_metaFsPath);
        }
        return Module::FAILED;
    }

    HCP_Log(INFO, MODULE) << "Copy Meta sub job finish" << HCPENDLOG;
    ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::COMPLETED, OBSPlugin::PROGRESS100);
    
    if (NeedContinueCheckpoint()) {
        RecordMetaPerfix(subJob);
    }
    return Module::SUCCESS;
}

void ObjectStorageBackupJob::RecordMetaPerfix(ObjectStorageBackupSubJob& subJob)
{
    std::string prefixSubStr = (GetScanPrefixHash(subJob.prefixInfo.front()) == "0")
        ? subJob.prefixInfo.front().bucketName : GetScanPrefixHash(subJob.prefixInfo.front());
    std::string prefixFile = PathJoin(m_subCheckMetaPath, prefixSubStr);

    std::ofstream outfile(prefixFile);
    if (!IsFileExist(prefixFile)) {
        ERRLOG("prefixFile create fail %s", prefixFile.c_str());
        return;
    }
    outfile.close();

    INFOLOG("prefixFile create success %s", prefixFile.c_str());
    return;
}

int ObjectStorageBackupJob::CopyMetaFileToMetaRepo(
    const std::string& prefixSubStr, const std::string& metaPath, bool& isCopying, bool& isZipSuccess)
{
    HCPTSP::getInstance().reset(m_subJobRequestId);
    HCP_Log(INFO, MODULE) << "Enter CopyMetaFileToMetaRepo, prefixSubStr: " << prefixSubStr << HCPENDLOG;
    int ret = Module::SUCCESS;
    std::shared_ptr<void> defer(nullptr, [&](...) {
        isCopying = false;
        isZipSuccess = (ret == Module::SUCCESS);
    });

    const std::string srcDir = m_scanMetaPath + prefixSubStr + "/latest/";
    const std::string targetDir = metaPath + "/filemeta" + prefixSubStr;

    if (!RecurseCreateDirectory(targetDir)) {
        HCP_Log(ERR, MODULE) << "CreateMetaDirectory Failed: " << prefixSubStr << HCPENDLOG;
        return Module::FAILED;
    }

    // 保留扫描生成的control文件及统计结果
    std::vector<std::string> paramList {};
    std::string copyCtrlCmd = "cp -r " + m_backupControlPath + " " + metaPath;
    std::string copyScanStatCmd = "cp -r " + m_scanStatisticsPath + " " + metaPath;
    std::string copyCmd = copyCtrlCmd + " && " + copyScanStatCmd + " ";
    HCP_Log(INFO, MODULE) << "run cmd : " << copyCmd << HCPENDLOG;
    ret = OBSPlugin::RunShellCmd(copyCmd, paramList);
    if (ret != Module::SUCCESS) {
        return ret;
    }

    if (!m_isCompressMeta) {
        ret = CopyUnCompressMeta(srcDir, targetDir);
        return ret;
    }

    std::string dirCacheZipFileName = targetDir + "metafile_DIRCACHE.gz";
    std::string fCacheZipFileName = targetDir + "metafile_FILECACHE.gz";
    std::string metaZipFilename = targetDir + "metafile_META.gz";
    std::string xmetaZipFileName = targetDir + "metafile_XMETA.gz";

    CheckAndDeleteFiles(dirCacheZipFileName);
    CheckAndDeleteFiles(fCacheZipFileName);
    CheckAndDeleteFiles(metaZipFilename);
    CheckAndDeleteFiles(xmetaZipFileName);

    std::string moveToDirCmd = "cd " + srcDir + ";";
    std::string zipDirCacheFilesCmd = "tar -cf - dircache* metafile_count.txt scanner_status.txt | pigz -1k > "
        + dirCacheZipFileName + " && ";
    std::string zipFCacheFilesCmd = "tar -cf - filecache_*  | pigz -1k > " + fCacheZipFileName + " && ";
    std::string zipMetaFilesCmd = "tar -cf - meta_file_*  | pigz -1k > " + metaZipFilename + " && ";
    std::string zipXMetaFilesCmd = "tar -cf - xmeta_file_*  | pigz -1k > " + xmetaZipFileName + " ";

    std::string execCmd = moveToDirCmd + zipDirCacheFilesCmd + zipFCacheFilesCmd + zipMetaFilesCmd + zipXMetaFilesCmd;

    HCP_Log(INFO, MODULE) << "run cmd : " << execCmd << HCPENDLOG;
    ret = OBSPlugin::RunShellCmd(execCmd, paramList);
    if (ret != 0) {
        return ret;
    }

    HCP_Log(INFO, MODULE) << "Copy meta file finish , set isCopying to false." << HCPENDLOG;
    return Module::SUCCESS;
}

int ObjectStorageBackupJob::PostJobInner()
{
    HCP_Log(INFO, MODULE) << "start post " << HCPENDLOG;
    if (!InitJobInfo()) {
        HCP_Log(ERR, MODULE) << "InitJobInfo failed" << HCPENDLOG;
        RemoveCacheDirectories();
        DeleteSharedResources(m_jobId);
        DeleteStorageGeneralResources(m_jobId);
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_data_fail_label",
            ObsErrorCode::BACKUP_PARAMETER_CHECK_FAILED_ERROR_CODE);
        m_nodeLevelTaskInfo.Erase(m_jobId);
        return Module::FAILED;
    }

    PrintJobInfo();
    ReportAverageSpeed();
    MergeBackupFailureRecords();

    if (!GetSharedResources(m_jobId, m_generalInfo, m_scanStats, m_backupStats)) {
        HCP_Log(ERR, MODULE) << "GetSharedResources failed" << HCPENDLOG;
        RemoveCacheDirectories();
        DeleteSharedResources(m_jobId);
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_data_fail_label",
            ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        m_nodeLevelTaskInfo.Erase(m_jobId);
        return Module::FAILED;
    }

    if (!PostReportCopyAdditionalInfo()) {
        HCP_Log(ERR, MODULE) << "PostReportCopyAdditionalInfo failed" << HCPENDLOG;
        RemoveCacheDirectories();
        DeleteSharedResources(m_jobId);
        ReportJobDetailsWithLabelAndErrcode(
            std::make_tuple(JobLogLevel::TASK_LOG_ERROR, SubJobStatus::FAILED, OBSPlugin::PROGRESS0),
            "object_storage_plugin_backup_data_fail_label",
            ObsErrorCode::ERROR_AGENT_INTERNAL_ERROR);
        m_nodeLevelTaskInfo.Erase(m_jobId);
        return Module::FAILED;
    }

    DeleteSharedResources(m_jobId);
    DeleteStorageGeneralResources(m_jobId);
    RemoveCacheDirectories();

    Remove(m_backupObjectInfoFilePath);
    Remove(m_certFilePath);

    ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::COMPLETED, OBSPlugin::PROGRESS100);
    m_nodeLevelTaskInfo.Erase(m_jobId);
    ResourceManager::GetInstance().Erase(m_jobId);

    return Module::SUCCESS;
}

void ObjectStorageBackupJob::RemoveCacheDirectories()
{
    INFOLOG("RemoveCacheDirectories cacheFsPath: %s", m_cacheFsPath.c_str());
    if (IsBucketLogInc() || (NeedContinueCheckpoint() && m_jobResult != AppProtect::JobResult::type::SUCCESS)) {
        HCP_Log(INFO, MODULE) << "dont delete " << HCPENDLOG;
        HCP_Log(WARN, MODULE) << "Bucket log inc backup job exec failed, jobid " << m_jobId << HCPENDLOG;
    } else {
        OBSPlugin::Remove(m_cacheFsPath);
        OBSPlugin::Remove(PathJoin(GetPathName(m_subCheckPath)));
        if (NeedContinueCheckpoint()) {
            OBSPlugin::Remove(m_subCheckPath);
        }
    }
}

bool ObjectStorageBackupJob::PostReportCopyAdditionalInfo()
{
    if (m_jobResult == AppProtect::JobResult::type::SUCCESS) {
        BackupObjectInfosInCopy backupObjectInfosInCopy;
        if (!JsonFileTool::ReadFromFile(m_backupObjectInfoFilePath, backupObjectInfosInCopy)) {
            HCP_Log(ERR, MODULE) << "Read backup object info from file failed" << HCPENDLOG;
            return false;
        }
        backupObjectInfosInCopy.pathSuffix = m_backupJobPtr->copy.id;
        backupObjectInfosInCopy.bucketLogIncBackupInfo = m_generalInfo.bucketLogIncBackupInfo;

        Copy image;
        image.__set_id(m_backupJobPtr->copy.id); // 副本ID
        image.__set_formatType(m_backupJobPtr->copy.formatType);
        HCP_Log(INFO, MODULE) << "m_backupJobPtr->copy.formatType:" << m_backupJobPtr->copy.formatType << HCPENDLOG;

        std::string extendInfo;
        if (!Module::JsonHelper::StructToJsonString(backupObjectInfosInCopy, extendInfo)) {
            ERRLOG("Exit ReportCopyAdditionalInfo Failed,aggCopyExtendInfo json trans failed");
            return false;
        }
        INFOLOG("image set extendinfo %s", extendInfo.c_str());
        image.__set_extendInfo(extendInfo);
 
        // 构造数据仓和meta仓地址上报给UBC
        std::vector<StorageRepository> repositories;
        BuildCopyRepositories(repositories);
        if (!SetRepoAbnormalFlag(backupObjectInfosInCopy.objectInfos, repositories)) {
            ERRLOG("Set repository abnormal flag Failed");
            return false;
        }
        // 设置仓是否正常
        image.__set_repositories(repositories);
 
        int retryCnt = 0;
        ActionResult returnValue;
        do {
            if (retryCnt == MIDDLE_NORMAL_RETRY_TIMES) {
                KeepJobAlive(m_jobId, m_subJobId);
            }
            JobService::ReportCopyAdditionalInfo(returnValue, m_jobId, image);
            if (returnValue.code == MP_SUCCESS) {
                DBGLOG("Exit ReportCopyAdditionalInfo, success report image: %s", WIPE_SENSITIVE(image).c_str());
                return true;
            }
            Module::SleepFor(std::chrono::seconds(NORMAL_RETRY_INTERVAL));
            WARNLOG("ReportCopyAdditionalInfo failed, image: %s, retry: %d", WIPE_SENSITIVE(image).c_str(), retryCnt);
        } while (++retryCnt <= NORMAL_RETRY_TIMES);
        ERRLOG("Exit ReportCopyAdditionalInfo, failed report image: %s after maxRetry", WIPE_SENSITIVE(image).c_str());
        return false;
    }

    return true;
}

void ObjectStorageBackupJob::BuildCopyRepositories(std::vector<StorageRepository>& repositories)
{
    for (const auto& metaFs : m_multiMetaFsList) {
        StorageRepository metaStorageRep;
        metaStorageRep.__set_id(metaFs.id); // 文件系统ID
        metaStorageRep.__set_repositoryType(RepositoryDataType::META_REPOSITORY);
        metaStorageRep.__set_isLocal(metaFs.isLocal);
        const std::string metaRemotePath = IsAggregate() ? (metaFs.remotePath + "/" + m_backupJobPtr->copy.id) :
            metaFs.remotePath;
        metaStorageRep.__set_remotePath(metaRemotePath);
        metaStorageRep.__set_remoteHost(metaFs.remoteHost);
        metaStorageRep.__set_protocol(metaFs.protocol);
        metaStorageRep.__set_extendInfo(metaFs.extendInfo);
        repositories.emplace_back(metaStorageRep);
    }

    for (const auto& dataFs : m_multiDataFsList) {
        StorageRepository dataStorageRep;
        dataStorageRep.__set_id(dataFs.id);
        dataStorageRep.__set_repositoryType(RepositoryDataType::DATA_REPOSITORY);
        dataStorageRep.__set_isLocal(dataFs.isLocal);
        const std::string dataRemotePath = IsAggregate() ? (dataFs.remotePath + "/" + m_backupJobPtr->copy.id) :
            dataFs.remotePath;
        dataStorageRep.__set_remotePath(dataRemotePath);
        dataStorageRep.__set_remoteHost(dataFs.remoteHost);
        dataStorageRep.__set_protocol(dataFs.protocol);
        dataStorageRep.__set_extendInfo(dataFs.extendInfo);
        repositories.emplace_back(dataStorageRep);
    }
}

void ObjectStorageBackupJob::ReportAverageSpeed()
{
    ObjectStorageNativeBackupStats backupStatistics {};
    ReadBackupStatsFromFile(m_statisticsPath + "/backup-stats-main-" + m_jobId + ".json", backupStatistics);
    const auto diffTime = backupStatistics.m_backupEndTime - backupStatistics.m_backupStartTime;
    HCP_Log(INFO, MODULE) << "backup end time: " << backupStatistics.m_backupEndTime
        << " backup start time: " << backupStatistics.m_backupStartTime
        << " total data size: " << backupStatistics.m_noOfBytesCopied << HCPENDLOG;
    if (diffTime > 0) {
        m_averageSpeed = backupStatistics.m_noOfBytesCopied / diffTime / NUMBER1024;
    }
    ReportJobDetails(JobLogLevel::TASK_LOG_INFO, SubJobStatus::RUNNING, OBSPlugin::PROGRESS0); // 上报任务平均速度
}