/*
* This file is a part of the open-eBackup project.
* This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
* If a copy of the MPL was not distributed with this file, You can obtain one at
* http://mozilla.org/MPL/2.0/.
*
* Copyright (c) [2024] Huawei Technologies Co.,Ltd.
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
*/
#include "BuildIndexServiceJob.h"
#include <algorithm>
#include <unordered_map>
#include <cstring>
#include <boost/filesystem.hpp>
#include <sys/types.h>
#include <sys/stat.h>
#include "ClientInvoke.h"
#include "ApplicationProtectPlugin_types.h"
#include "ApplicationProtectBaseDataType_types.h"
#include "ApplicationProtectFramework_types.h"
#include "JsonTransUtil.h"
#include "common/CleanMemPwd.h"
#include "system/System.hpp"
#include "PluginConstants.h"
#include "PluginUtilities.h"
#include "common/EnvVarManager.h"

using namespace std;
using namespace AppProtect;
using namespace Module;
using namespace PluginUtils;

namespace {
    constexpr auto MODULE = "NASIndex";
    constexpr auto REPOINDEX = 0;
    constexpr uint32_t NUM_10 = 10;
    constexpr auto RFI = "rfi";
    const int INDEX_REPORT_INTERVAL = 45;
    constexpr int HOST_PROTOCOL_TYPE_NFS = 1;
    constexpr int HOST_PROTOCOL_TYPE_CIFS = 2;
    constexpr int HOST_PROTOCOL_TYPE_NFS_CIFS = 3;
    const std::string DIRECACHE_FILE_NAME = "dircache";
    const std::string FILECACHE_FILE_NAME_0 = "filecache_0";
    const std::string META_FILE_NAME_0 = "meta_file_0";
    const std::string XMETA_FILE_NAME_0 = "xmeta_file_0";
    const std::string META_FILE_COUNT_NAME = "metafile_count.txt";
    // const std::string KRB5CCNAMEPREFIX = "/DataBackup/ProtectClient/ProtectClient-E/tmp/tkt_";
    // const std::string KRB5CONFIGPOSTFIX = ".conf";
}

std::shared_ptr<AppProtect::BuildIndexJob> BuildIndexServiceJob::GetJobInfoBody()
{
    return dynamic_pointer_cast<AppProtect::BuildIndexJob>(GetJobInfo()->GetJobInfo());
}

BuildIndexServiceJob::~BuildIndexServiceJob()
{
    if (m_cacheRepo) {
        CleanMemoryPwd(m_cacheRepo->auth.authPwd);
        CleanMemoryPwd(m_cacheRepo->auth.extendInfo);
    }

    if (m_preRepo) {
        CleanMemoryPwd(m_preRepo->auth.authPwd);
        CleanMemoryPwd(m_preRepo->auth.extendInfo);
    }

    if (m_curRepo) {
        CleanMemoryPwd(m_curRepo->auth.authPwd);
        CleanMemoryPwd(m_curRepo->auth.extendInfo);
    }

    if (m_indexRepo) {
        CleanMemoryPwd(m_indexRepo->auth.authPwd);
        CleanMemoryPwd(m_indexRepo->auth.extendInfo);
    }
}

int BuildIndexServiceJob::CheckBackupJobType()
{
    SetJobToFinish();
    return SUCCESS;
}

EXTER_ATTACK int BuildIndexServiceJob::PrerequisiteJob()
{
    int ret = PrerequisiteJobInner();
    SetJobToFinish();
    return ret;
}

EXTER_ATTACK int BuildIndexServiceJob::GenerateSubJob()
{
    int ret = GenerateSubJobInner();
    SetJobToFinish();
    return ret;
}

EXTER_ATTACK int BuildIndexServiceJob::ExecuteSubJob()
{
    int ret = ExecuteSubJobInner();
    SetJobToFinish();
    return ret;
}

EXTER_ATTACK int BuildIndexServiceJob::PostJob()
{
    int ret = PostJobInner();
    SetJobToFinish();
    return ret;
}

int BuildIndexServiceJob::PrerequisiteJobInner() const
{
    return SUCCESS;
}

int BuildIndexServiceJob::GenerateSubJobInner()
{
    m_indexPara = GetJobInfoBody();
    HCP_Log(INFO, MODULE) << "Generate sub task for build index task, task id is " << m_indexPara->jobId << HCPENDLOG;
    SubJob subJob {};
    subJob.__set_jobId(m_indexPara->jobId);
    subJob.__set_jobType(SubJobType::BUSINESS_SUB_JOB);
    subJob.__set_jobName("BuildIndex");
    subJob.__set_policy(ExecutePolicy::ANY_NODE);
    vector<SubJob> vec {};
    vec.push_back(subJob);

    ActionResult result {};
    JobService::AddNewJob(result, vec);
    SubJobDetails subJobDetails {};
    subJobDetails.__set_jobId(m_indexPara->jobId);
    subJobDetails.__set_jobStatus(SubJobStatus::COMPLETED);
    string description = "Generate sub task for build index task successfully";
    LogDetail logDetail {};
    vector<LogDetail> logDetails {};
    logDetail.__set_description(description);
    logDetails.push_back(logDetail);
    subJobDetails.__set_logDetail(logDetails);
    JobService::ReportJobDetails(result, subJobDetails);
    SetJobToFinish();
    HCP_Log(INFO, MODULE) << "Finish to generate sub job , main task id is " << m_indexPara->jobId << HCPENDLOG;
    return SUCCESS;
}

int BuildIndexServiceJob::ExecuteSubJobInner()
{
    m_indexPara = GetJobInfoBody();
    // 设置m_jobId为主任务id， m_subJobId为子任务id
    SetMainJobId(m_indexPara->jobId);
    SetSubJobId();
    // 识别cache repo, pre data repo, cur data repo
    int ret = IdentifyRepos();
    if (ret != SUCCESS) {
        HCP_Log(ERR, MODULE) << "Bad Identify Repos" << HCPENDLOG;
        ReportJob(SubJobStatus::FAILED);
        return FAILED;
    }
    if (IsAbortJob()) {
        ERRLOG("Job aborted, skip scanner.");
        return FAILED;
    }
    if (m_indexType == IndexType::HOMO_INDEX_FULL ||
        m_indexType == IndexType::HOMO_INDEX_INC) {
        ret = ProcessHomoIndex();
        if (ret != SUCCESS) {
            ReportJob(SubJobStatus::FAILED);
            HCP_Log(ERR, MODULE) << "Bad set up scanner" << HCPENDLOG;
            return FAILED;
        }
        return SUCCESS;
    }
    // 拿storage resptory 给scanner扫描
    ret = ProcessHetroIndex();
    if (ret != SUCCESS) {
        ReportJob(SubJobStatus::FAILED);
        HCP_Log(ERR, MODULE) << "Bad set up scanner" << HCPENDLOG;
        return FAILED;
    }
    ReportJob(SubJobStatus::COMPLETED);
    return SUCCESS;
}

int BuildIndexServiceJob::PostJobInner() const
{
    return SUCCESS;
}

// 识别cache repo , pre data repo , cur data repo
int BuildIndexServiceJob::IdentifyRepos()
{
    m_indexPara = GetJobInfoBody();
    for (unsigned int i = 0; i < m_indexPara->copies.size(); i++) {
        for (auto repo : m_indexPara->copies[i].repositories) {
            if (IdentifyRepo(repo) != SUCCESS) {
                return FAILED;
            }
        }
    }
    for (auto repo : m_indexPara->repositories) {
        if (IdentifyRepo(repo) != SUCCESS) {
            return FAILED;
        }
    }
    if (m_indexPara->indexProtectObject.subType == "NasFileSystem") {
        m_indexType = m_preRepo != nullptr ? IndexType::HOMO_INDEX_INC : IndexType::HOMO_INDEX_FULL;
    } else if (m_indexPara->indexProtectObject.subType == "NasShare") {
        m_indexType = m_preRepo != nullptr ? IndexType::HETRO_INDEX_INC : IndexType::HETRO_INDEX_FULL;
    }
    if (m_curRepo == nullptr || m_cacheRepo == nullptr || m_indexRepo == nullptr) {
        HCP_Log(ERR, MODULE) << "repo init not complete" << HCPENDLOG;
        return FAILED;
    }
    if (m_cacheRepo->path.size() == 0 || m_indexRepo->path.size() == 0) {
        ERRLOG("m_cacheRepo->path.size: %d, m_indexRepo->path.size: %d",
            m_cacheRepo->path.size(), m_indexRepo->path.size());
        return FAILED;
    }
    return SUCCESS;
}

int BuildIndexServiceJob::IdentifyRepo(StorageRepository& repo)
{
    HCP_Log(INFO, MODULE) << "repo type: " << repo.repositoryType <<HCPENDLOG;
    PrintRepo(repo);
    if (repo.repositoryType == RepositoryDataType::type::DATA_REPOSITORY) {
        StorageRepositoryExtendInfo extendInfo {};
        if (!JsonHelper::JsonStringToStruct(repo.extendInfo, extendInfo)) {
            HCP_Log(ERR, MODULE) << "Data storage repository extend info is invaild." << HCPENDLOG;
            return FAILED;
        }
        if (extendInfo.isCurrentCopyRepo) {
            m_curRepo = std::make_shared<StorageRepository>(repo);
            m_curRepoExtendInfo = std::make_shared<StorageRepositoryExtendInfo>(extendInfo);
            HCP_Log(INFO, MODULE) << "set cur repo complete" << HCPENDLOG;
        } else {
            m_preRepo = std::make_shared<StorageRepository>(repo);
            m_preRepoExtendInfo = std::make_shared<StorageRepositoryExtendInfo>(extendInfo);
            HCP_Log(INFO, MODULE) << "set pre repo complete" << HCPENDLOG;
        }
    } else if (repo.repositoryType == RepositoryDataType::type::CACHE_REPOSITORY) {
        m_cacheRepo = std::make_shared<StorageRepository>(repo);
        HCP_Log(INFO, MODULE) << "set cache repo complete" << HCPENDLOG;
    } else if (repo.repositoryType == RepositoryDataType::type::INDEX_REPOSITORY) {
        m_indexRepo = std::make_shared<StorageRepository>(repo);
        HCP_Log(INFO, MODULE) << "set index repo complete" << HCPENDLOG;
    } else if (repo.repositoryType == RepositoryDataType::type::META_REPOSITORY) {
        m_metaRepo = std::make_shared<StorageRepository>(repo);
        StorageRepositoryExtendInfo extendInfo {};
        if (!JsonHelper::JsonStringToStruct(repo.extendInfo, extendInfo)) {
            HCP_Log(ERR, MODULE) << "Meta storage repository extend info is invalid." << HCPENDLOG;
            return FAILED;
        }
        if (extendInfo.isCurrentCopyRepo) {
            m_metaRepo = std::make_shared<StorageRepository>(repo);
            HCP_Log(INFO, MODULE) << "set cur meta repo complete!" << HCPENDLOG;
        } else {
            m_preMetaRepo = std::make_shared<StorageRepository>(repo);
            HCP_Log(INFO, MODULE) << "set pre meta repo complete!" << HCPENDLOG;
        }
    } else {
        HCP_Log(ERR, MODULE) << "Receive invalid repo type : " << repo.repositoryType << HCPENDLOG;
    }
    return SUCCESS;
}

// scaner相关设置
int BuildIndexServiceJob::ProcessHetroIndex()
{
    bool isFullIndex = (m_preRepo == nullptr);
    string curDcahcePath = m_metaRepo->path[0] + dir_sep + METAFILE_PARENT_DIR;
    if (!CheckDcacheExist(curDcahcePath, true)) {
        // do not have dcache, report fail
        ERRLOG("dcahce doesn't exist, %s", curDcahcePath.c_str());
        return FAILED;
    }
    string prevDcachePath = isFullIndex ? "" : m_preMetaRepo->path[0] + dir_sep + METAFILE_PARENT_DIR;
    if (!isFullIndex && !CheckDcacheExist(prevDcachePath, false)) {
        ERRLOG("dcache doesn't exist, %s", prevDcachePath.c_str());
        return FAILED;
    }

    // unzip and prepare latest and previous dir
    string metaFilePath = m_metaRepo->path[0] + dir_sep + METAFILE_PARENT_DIR + dir_sep + METAFILE_ZIP_NAME;
    string preMetaFilePath = isFullIndex ? "" : prevDcachePath + dir_sep + METAFILE_ZIP_NAME;
    string workDir = m_cacheRepo->path[0];

    m_isPreparing = true;
    thread unzipThread(&BuildIndexServiceJob::PrepareForGenerateRfi, this, preMetaFilePath, metaFilePath);
    time_t lastReportTime = PluginUtils::GetCurrentTimeInSeconds();
    while (m_isPreparing) {
        sleep(1);
        time_t currentTime = PluginUtils::GetCurrentTimeInSeconds();
        if ((currentTime - lastReportTime) <= INDEX_REPORT_INTERVAL) {
            continue;
        }
        lastReportTime = currentTime;
        ReportJob(SubJobStatus::RUNNING);
    }
    unzipThread.join();
    INFOLOG("Finish prepare %s, %s", preMetaFilePath.c_str(), metaFilePath.c_str());
    // dcache generate rfi
    ScanConfig scanConfig {};
    FillScanConfigForGenerateRfi(scanConfig, isFullIndex);

    if (!StartScanner(scanConfig)) {
        ERRLOG("Start Scanner Failed!");
        ReportJob(SubJobStatus::FAILED);
        return Module::FAILED;
    }
    
    MonitorScanner();
    if (m_scanner != nullptr) {
        m_scanner->Destroy();
    }
    if (static_cast<int>(m_scanStatus) < 0) {
        ERRLOG("Scan Failed! %d", static_cast<int>(m_scanStatus));
        return Module::FAILED;
    }
    return Module::SUCCESS;
}

bool BuildIndexServiceJob::CheckDcacheExist(string& metaFilePath, bool isCur)
{
    string metaFile = metaFilePath + dir_sep + METAFILE_ZIP_NAME;
    DBGLOG("CheckDcacheExist %s", metaFile.c_str());
    if (PluginUtils::IsFileExist(metaFile)) {
        INFOLOG("old version metafile exist!");
        if (isCur) {
            INFOLOG("cur copy meta version 1");
            m_curMetaFileVersion = MetaFileVersion::METAFILEVERSION_1;
        } else {
            INFOLOG("pre copy meta version 1");
            m_preMetaFileVersion = MetaFileVersion::METAFILEVERSION_1;
        }
        return true;
    }
    string dirCacheZipFileName = metaFilePath + "/metafile_DIRCACHE.gz";
    string fCacheZipFileName = metaFilePath + "/metafile_FILECACHE.gz";
    string metaZipFilename  = metaFilePath + "/metafile_META.gz";
    string xmetaZipFileName = metaFilePath + "/metafile_XMETA.gz";
    if (PluginUtils::IsFileExist(dirCacheZipFileName) &&
        PluginUtils::IsFileExist(fCacheZipFileName) &&
        PluginUtils::IsFileExist(metaZipFilename) &&
        PluginUtils::IsFileExist(xmetaZipFileName)) {
        INFOLOG("metafile exist");
        if (isCur) {
            INFOLOG("cur copy meta version 2");
            m_curMetaFileVersion = MetaFileVersion::METAFILEVERSION_2;
        } else {
            INFOLOG("pre copy meta version 2");
            m_preMetaFileVersion = MetaFileVersion::METAFILEVERSION_2;
        }
        return true;
    }
    ERRLOG("meta file not exist!");
    return false;
}

bool BuildIndexServiceJob::StartScanner(ScanConfig& scanConfig)
{
    INFOLOG("Enter StartScanner: %s, %s", scanConfig.jobId.c_str(), scanConfig.subJobId.c_str());
    m_scanner = ScanMgr::CreateScanInst(scanConfig);
    if (m_scanner == nullptr) {
        ERRLOG("Start Scan failed!");
        return false;
    }

    if (m_scanner->Start() != SCANNER_STATUS::SUCCESS) {
        ERRLOG("Start scanner instance failed!");
        m_scanner->Destroy();
        return false;
    }
    INFOLOG("Leave Start Scanner!");
    return true;
}

void BuildIndexServiceJob::MonitorScanner()
{
    INFOLOG("Enter Monitor Scanner");
    SCANNER_TASK_STATUS scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_INPROGRESS;
    SubJobStatus::type jobStatus = SubJobStatus::FAILED;

    do {
        m_scanStatus = m_scanner->GetStatus();
        FillMonitorScannerVarDetails(scanTaskStatus, jobStatus);
        if (scanTaskStatus != SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_INPROGRESS) {
            break;
        }

        if (IsAbortJob()) {
            INFOLOG("Scanner - Abort is invoked for jobId : %s, subjobId : %s", m_indexPara->jobId.c_str(),
                m_subJobInfo->subJobId.c_str());
            m_scanner->Abort();
            ReportJob(SubJobStatus::ABORTING);
            break;
        }
        ReportJob(SubJobStatus::RUNNING);
        sleep(SLEEP_TEN_SECONDS);
    } while (true);
}

void BuildIndexServiceJob::FillMonitorScannerVarDetails(SCANNER_TASK_STATUS& scanTaskStatus,
    SubJobStatus::type& jobStatus)
{
    if (m_scanStatus == SCANNER_STATUS::COMPLETED) {
        HCP_Log(INFO, MODULE) << "Scan completed" << HCPENDLOG;
        jobStatus = SubJobStatus::COMPLETED;
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_SUCCESS;
    } else if (m_scanStatus == SCANNER_STATUS::FAILED) {
        HCP_Log(ERR, MODULE) << "Scan failed" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::ABORT_IN_PROGRESS) {
        HCP_Log(ERR, MODULE) << "Scan abort in progress" << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTING;
    } else if (m_scanStatus == SCANNER_STATUS::ABORTED) {
        HCP_Log(ERR, MODULE) << "Scan aborted" << HCPENDLOG;
        jobStatus = SubJobStatus::ABORTED;
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_ABORTED;
    } else if (m_scanStatus == SCANNER_STATUS::SCAN_READ_COMPLETED) {
        jobStatus = SubJobStatus::RUNNING;
    } else if (m_scanStatus == SCANNER_STATUS::CTRL_DIFF_IN_PROGRESS) {
        jobStatus = SubJobStatus::RUNNING;
    } else if (m_scanStatus == SCANNER_STATUS::SECONDARY_SERVER_NOT_REACHABLE) {
        HCP_Log(ERR, MODULE) << "Scan failed as sec nas server is not reachable" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::PROTECTED_SERVER_NOT_REACHABLE) {
        HCP_Log(ERR, MODULE) << "Scan failed as protected nas server is not reachable" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    } else if (m_scanStatus == SCANNER_STATUS::ERROR_INC_TO_FULL) {
        HCP_Log(ERR, MODULE) << "Scan failed as to change INC to FULL Backup" << HCPENDLOG;
        jobStatus = SubJobStatus::FAILED;
        scanTaskStatus = SCANNER_TASK_STATUS::SCANNER_TASK_STATUS_FAILED;
    }
    return;
}

int BuildIndexServiceJob::ProcessHomoIndex()
{
    if (m_indexType == IndexType::HOMO_INDEX_INC) {
        return ProcessHomoIncIndex();
    }
    PluginUtils::CreateDirectory(PluginUtils::StandardPath(m_cacheRepo->path[0] + dir_sep + META + dir_sep + LATEST));
    // Full index
    if (ProcessHomoScan(true) != Module::SUCCESS) {
        ERRLOG("Process Full Scan Failed!");
        return Module::FAILED;
    }
    m_isPreparing = true;
    thread copyThread(&BuildIndexServiceJob::CopyMetaFileToMetaRepo, this, true);
    time_t lastReportTime = PluginUtils::GetCurrentTimeInSeconds();
    while (m_isPreparing) {
        sleep(1);
        time_t currentTime = PluginUtils::GetCurrentTimeInSeconds();
        if ((currentTime - lastReportTime) <= INDEX_REPORT_INTERVAL) {
            continue;
        }
        lastReportTime = currentTime;
        ReportJob(SubJobStatus::RUNNING);
    }
    copyThread.join();
    PluginUtils::CreateDirectory(PluginUtils::StandardPath(m_cacheRepo->path[0] + dir_sep + RFI));
    // zip and move to meta repo && generate rfi and report
    ScanConfig scanConfig {};
    FillScanConfigForGenerateRfi(scanConfig, true);

    if (!StartScanner(scanConfig)) {
        ERRLOG("Start scanner Failed!");
        return FAILED;
    }

    MonitorScanner();
    if (m_scanner != nullptr) {
        m_scanner->Destroy();
    }
    if (static_cast<int>(m_scanStatus) < 0) {
        ERRLOG("Scan Failed! %d", static_cast<int>(m_scanStatus));
        return FAILED;
    }
    return SUCCESS;
}

// 检查CIFS共享
bool BuildIndexServiceJob::CheckCifsConnectivity(const std::string& ip)
{
    INFOLOG("Enter check Cifs Connectivity");
    bool ret = InitNasShareInfo();
    if (!ret) {
        ERRLOG("Init Nas Share Info Failed!");
        return false;
    }

    std::vector<Module::SmbVersion> cifsVersion {
        SmbVersion::VERSION0311,
        SmbVersion::VERSION0302,
        SmbVersion::VERSION0300,
        SmbVersion::VERSION0210,
        SmbVersion::VERSION0202
    };

    vector<std::string> m_serviceIPList = {ip};
    for (auto version : cifsVersion) {
        if (CheckSmbConnection(m_serviceIPList[REPOINDEX], version)) {
            INFOLOG("check ip : %s success!", m_serviceIPList[REPOINDEX].c_str());
            return true;
        }
    }
    ERRLOG("check Smb Connection faild");
    return false;
}

bool BuildIndexServiceJob::InitNasShareInfo()
{
    INFOLOG("Enter Init Nas Share Info");
    /* Protected NAS Share details */
    std::string remoteName = m_curRepo->remoteName;
    m_nasShare.sharePath = remoteName[0] == '/' ? remoteName : "/" + remoteName;
    INFOLOG("m_nasSharePath: %s", m_nasShare.sharePath.c_str());
    m_nasShare.auth = m_curRepo->auth;
    if (!Module::JsonHelper::JsonStringToStruct(m_indexPara->indexProtectObject.extendInfo, m_nasShare.nasShareExt)) {
        HCP_Log(ERR, MODULE) << "JsonStringToStruct failed." << HCPENDLOG;
        return false;
    }
    return true;
}

bool BuildIndexServiceJob::CheckSmbConnection(std::string serviceIP, Module::SmbVersion version)
{
    INFOLOG("enter check smb connection");
    std::string agentHomePath = Module::EnvVarManager::GetInstance()->GetAgentHomePath();
    std::string krb5CcacheFile = agentHomePath + KRB5CCNAMEPREFIX + m_jobId;
    std::string krb5ConfigFile = agentHomePath + KRB5CONFIGPREFIX + m_jobId + KRB5CONFIGPOSTFIX;

    Module::SmbAuthType smbRmtAuthType;
    if (m_nasShare.auth.authType == AuthType::type::NO_AUTHENTICATION ||
        m_nasShare.auth.authType == AuthType::type::OS_PASSWORD ||
        m_nasShare.auth.authType == AuthType::type::APP_PASSWORD) {
        smbRmtAuthType = Module::SmbAuthType::NTLMSSP;
    } else {
        HCP_Log(INFO, MODULE) << "Wrong authType for cifs share: " << m_nasShare.auth.authType << HCPENDLOG;
        return MP_FALSE;
    }
    bool smbEncryption = (m_nasShare.nasShareExt.m_encryption == SMB_ENCRYPTION) ? true : false;
    Module::SmbContextArgs smbContextArgs = {
        m_nasShare.nasShareExt.m_domainName,
        serviceIP,
        m_nasShare.sharePath,
        m_nasShare.auth.authkey,
        m_nasShare.auth.authPwd,
        krb5CcacheFile,
        krb5ConfigFile,
        smbEncryption,
        false,
        ONE_MINUTE,
        smbRmtAuthType,
        version
    };
    INFOLOG("SmbContextArgs.domain: %s", smbContextArgs.domain.c_str());
    INFOLOG("SmbContextArgs.server: %s", smbContextArgs.server.c_str());
    INFOLOG("SmbContextArgs.share: %s", smbContextArgs.share.c_str());

    Module::SmbContextWrapper rmtSmb(smbContextArgs);
    if (!rmtSmb.Init()) {
        ERRLOG("smb init failed");
        return false;
    }

    if (!rmtSmb.SmbConnect()) {
        INFOLOG("connect failed version 0x%X\n", static_cast<int>(version));
        INFOLOG("smb connect failed");
        return false;
    }
    INFOLOG("smb connect success");
    return true;
}

bool BuildIndexServiceJob::GetValideServiceIP()
{
    bool ret = false;
    if (m_curRepo->protocol == RepositoryProtocolType::type::CIFS) {
        std::vector<std::string> ipList;
        for (const auto& host : m_curRepo->remoteHost) {
            ipList = {host.ip};
            INFOLOG("ADD ip route : %s", host.ip.c_str());
            ret = PluginUtils::OperateIpsRule(ipList, "ADD");
            ret = CheckCifsConnectivity(host.ip);
            if (ret) {
                m_serviceIP = host.ip;
                break;
            }
            ret = PluginUtils::OperateIpsRule(ipList, "DELETE");
        }
    } else {
        INFOLOG("repo protocol is not CIFS");
        ret = true;
    }
    return ret;
}

int BuildIndexServiceJob::ProcessHomoScan(bool isCur)
{
    INFOLOG("Enter ProcessHomoScan");
    if (!GetValideServiceIP()) {
        ERRLOG("get valid service ip failed");
        return Module::FAILED;
    }
    std::shared_ptr<void> defer(nullptr, [&](...) {
        DeleteIpsRuleForSmb();
    });

    if (!AddIpsRuleForSmb()) {
        ERRLOG("Add ip rule failed!");
        return Module::FAILED;
    }  // 对于文件系统cifs共享需要添加路由
    ScanConfig scanConfig {};
    // 第二次扫描时需要识别上一次使用的算法(CRC/SHA_1)
    FillScanConfigForScan(scanConfig, isCur);

    m_scanner = ScanMgr::CreateScanInst(scanConfig);
    if (m_scanner == nullptr) {
        ERRLOG("Start scanner failed!");
        return Module::FAILED;
    }

    if (isCur) {
        if (m_curRepo->protocol == RepositoryProtocolType::type::NFS) {
            m_scanner->Enqueue(".");
        } else if (m_curRepo->protocol == RepositoryProtocolType::type::CIFS) {
            m_scanner->Enqueue("");
        }
    } else {
        if (m_preRepo->protocol == RepositoryProtocolType::type::NFS) {
            m_scanner->Enqueue(".");
        } else if (m_preRepo->protocol == RepositoryProtocolType::type::CIFS) {
            m_scanner->Enqueue("");
        }
    }

    if (SCANNER_STATUS::SUCCESS != m_scanner->Start()) {
        ERRLOG("Start scanner instance failed!");
        return Module::FAILED;
    }

    INFOLOG("Start Scanner Success!");
    MonitorScanner();
    INFOLOG("Scan Finish!");
    if (m_scanner != nullptr) {
        m_scanner->Destroy();
    }
    if (static_cast<int>(m_scanStatus) < 0) {
        ERRLOG("Scan Failed! %d", static_cast<int>(m_scanStatus));
        return Module::FAILED;
    }
    return Module::SUCCESS;
}

bool BuildIndexServiceJob::AddIpsRuleForSmb()
{
    std::vector<std::string> ipList = {m_serviceIP};
    return PluginUtils::OperateIpsRule(ipList, "ADD");
}

bool BuildIndexServiceJob::DeleteIpsRuleForSmb()
{
    std::vector<std::string> ipList = {m_serviceIP};
    return PluginUtils::OperateIpsRule(ipList, "DELETE");
}

int BuildIndexServiceJob::ProcessHomoIncIndex()
{
    // previous 没有dcache扫描previous, cur没有dcache扫描cur
    // pre meta path metaRepo + copyid + metafile.zip
    // cur meta path metaRepo + copyid + metafile.zip
    PluginUtils::CreateDirectory(PluginUtils::StandardPath(m_cacheRepo->path[0] + dir_sep + META + dir_sep + LATEST));
    string prevMetaFileName = m_preMetaRepo->path[0] + dir_sep + m_preRepoExtendInfo->copyId + dir_sep +
        METAFILE_ZIP_NAME;
    string curMetaFileName = m_metaRepo->path[0] + dir_sep + m_curRepoExtendInfo->copyId + dir_sep + METAFILE_ZIP_NAME;
    bool prevDcacheExist = PluginUtils::IsFileExist(prevMetaFileName);
    bool curDcacheExist = PluginUtils::IsFileExist(curMetaFileName);

    m_isPreparing = true;
    m_result = Module::SUCCESS;
    thread prepareThread(&BuildIndexServiceJob::PrepareForHomoIncIndex, this, prevDcacheExist, false);
    time_t lastReportTime = PluginUtils::GetCurrentTimeInSeconds();
    while (m_isPreparing) {
        sleep(1);
        time_t currentTime = PluginUtils::GetCurrentTimeInSeconds();
        if ((currentTime - lastReportTime) <= INDEX_REPORT_INTERVAL) {
            continue;
        }
        lastReportTime = currentTime;
        ReportJob(SubJobStatus::RUNNING);
    }
    prepareThread.join();
    if (m_result != Module::SUCCESS) {
        return Module::FAILED;
    }

    // move meta lastest to previous
    string latestMetaPath = m_cacheRepo->path[0] + dir_sep + META + LATEST;
    string previousMetaPath = m_cacheRepo->path[0] + dir_sep + META + PREVIOUS;
    INFOLOG("rename dir : %s, %s", latestMetaPath.c_str(), previousMetaPath.c_str());
    PluginUtils::RenameDir(latestMetaPath, previousMetaPath);
    m_isPreparing = true;
    m_result = Module::SUCCESS;
    thread prepareThread2(&BuildIndexServiceJob::PrepareForHomoIncIndex, this, curDcacheExist, true);
    while (m_isPreparing) {
        sleep(1);
        time_t currentTime = PluginUtils::GetCurrentTimeInSeconds();
        if ((currentTime - lastReportTime) <= INDEX_REPORT_INTERVAL) {
            continue;
        }
        lastReportTime = currentTime;
        ReportJob(SubJobStatus::RUNNING);
    }
    prepareThread2.join();
    if (m_result != Module::SUCCESS) {
        return Module::FAILED;
    }
    return ProcessHomoIncIndex2();
}

int BuildIndexServiceJob::ProcessHomoIncIndex2()
{
    // generate rfi.
    PluginUtils::CreateDirectory(PluginUtils::StandardPath(m_cacheRepo->path[0] + dir_sep + RFI));
    // zip and move to meta repo && generate rfi and report
    ScanConfig scanConfig {};
    FillScanConfigForGenerateRfi(scanConfig, false);

    if (!StartScanner(scanConfig)) {
        ERRLOG("Start scanner Failed!");
        return FAILED;
    }

    MonitorScanner();
    if (m_scanner != nullptr) {
        m_scanner->Destroy();
    }
    if (static_cast<int>(m_scanStatus) < 0) {
        ERRLOG("Scan Failed! %d", static_cast<int>(m_scanStatus));
        return FAILED;
    }
    return SUCCESS;
}

void BuildIndexServiceJob::PrepareForHomoIncIndex(bool dcacheExist, bool isCur)
{
    if (!dcacheExist) {
        if (ProcessHomoScan(isCur) != Module::SUCCESS) {
            ERRLOG("ProcessHomoScan failed! isCur: %d", isCur);
            m_result = Module::FAILED;
        }
        CopyMetaFileToMetaRepo(isCur);
    } else {
        INFOLOG("Prev dcache exists!");
        UnzipToWorkDir(isCur);
    }
    m_isPreparing = false;
}

void BuildIndexServiceJob::CopyMetaFileToMetaRepo(bool isCur)
{
    vector<string> output;
    vector<string> errOutput;
    auto metaRepoPtr = isCur ? m_metaRepo : m_preMetaRepo;
    auto extendInfo = isCur ? m_curRepoExtendInfo : m_preRepoExtendInfo;
    string metaRepoPath = PluginUtils::StandardPath(metaRepoPtr->path[0] + dir_sep + extendInfo->copyId);
    INFOLOG("create dir : %s", metaRepoPath.c_str());
    PluginUtils::CreateDirectory(metaRepoPath);

    string cmd = "zip -qrj " + metaRepoPath + dir_sep + METAFILE_ZIP_NAME + " " +
        m_cacheRepo->path[0] + dir_sep + META + dir_sep + LATEST + "/*";
    INFOLOG("zip cmd : %s", cmd.c_str());
    int ret = runShellCmdWithOutput(INFO, MODULE, 0, cmd, { }, output, errOutput);
    if (ret != 0) {
        ERRLOG("run shell cmd failed! %s", cmd.c_str());
        for (auto msg : errOutput) {
            ERRLOG("errmsg : %s", msg.c_str());
        }
    }
    m_isPreparing = false;
}

void BuildIndexServiceJob::UnzipToWorkDir(bool isCur) const
{
    vector<string> output;
    vector<string> errOutput;
    auto metaRepoPtr = isCur ? m_metaRepo : m_preMetaRepo;
    auto extendInfo = isCur ? m_curRepoExtendInfo : m_preRepoExtendInfo;
    string metaRepoPath = metaRepoPtr->path[0] + dir_sep + extendInfo->copyId;
    string targetPath = m_cacheRepo->path[0] + dir_sep + META + dir_sep + LATEST;
    string metaFilePath = metaRepoPtr->path[0] + dir_sep + extendInfo->copyId + dir_sep +
        METAFILE_ZIP_NAME;
    string cmd = "unzip -d " + targetPath + " " + metaFilePath;
    INFOLOG("unzip cmd : %s", cmd.c_str());
    int ret = runShellCmdWithOutput(INFO, MODULE, 0, cmd, { }, output, errOutput);
    if (ret != 0) {
        ERRLOG("run shell cmd failed! %s", cmd.c_str());
        for (auto msg : errOutput) {
            ERRLOG("errmsg : %s", msg.c_str());
        }
    }
}

void BuildIndexServiceJob::SetScanHashType(ScanConfig& scanConfig) const
{
    // 判断是否是增量扫描，如果是，则需要设置相同的算法
    string prevDcacheFile = scanConfig.metaPath + PREVIOUS + dir_sep + DIRCACHE_FILE_NAME;
    DBGLOG("prevDcacheFile path: %s", prevDcacheFile.c_str());
    if (!PluginUtils::IsFileExist(prevDcacheFile)) {
        INFOLOG("prevDcacheFile not exist");
        return;
    }
    INFOLOG("prevDcacheFile exists");
    std::shared_ptr<DirCacheParser> prevDcacheObj = CreateDcacheObj(prevDcacheFile);
    if (prevDcacheObj == nullptr) {
        ERRLOG("dcache obj create fail");
        return;
    }
    if (stoi(prevDcacheObj->GetVersion()) > stoi("2.0")) {
        INFOLOG("preVersion is newer than 2.0, use SHA_1");
        scanConfig.scanHashType = SCAN_HASH_TYPE::SHA_1;
    } else {
        INFOLOG("preVersion is 2.0, use CRC");
        scanConfig.scanHashType = SCAN_HASH_TYPE::CRC;
    }
    // close file
    CTRL_FILE_RETCODE ret = prevDcacheObj->Close(CTRL_FILE_OPEN_MODE::READ);
    if (ret != CTRL_FILE_RETCODE::SUCCESS) {
        ERRLOG("Close dcache control file failed, errno: %d", ret);
    }
    return;
}

std::shared_ptr<DirCacheParser> BuildIndexServiceJob::CreateDcacheObj(const std::string& fname) const
{
    DBGLOG("Create Dcache obj with only read : %s", fname.c_str());
    std::shared_ptr<DirCacheParser> dirCacheObj = nullptr;
    dirCacheObj = std::make_shared<DirCacheParser>(fname);
    if (dirCacheObj == nullptr) {
        ERRLOG("Create scanner dircache instance failed filename: %s", fname.c_str());
        return nullptr;
    }

    CTRL_FILE_RETCODE ret = dirCacheObj->Open(CTRL_FILE_OPEN_MODE::READ);
    if (ret != CTRL_FILE_RETCODE::SUCCESS) {
        ERRLOG("Open dcache control file failed");
        return nullptr;
    }
    return dirCacheObj;
}

void BuildIndexServiceJob::FillScanConfigForScan(ScanConfig& scanConfig, bool isCur)
{
    INFOLOG("Enter FillScanConfig for scan");
    scanConfig.jobId = m_indexPara->jobId;
    if (isCur) {
        FillScanConfigBaseOnProtocol(scanConfig);
    } else {
        FillScanConfigBaseOnProtocolPrev(scanConfig);
    }

    scanConfig.scanType = ScanJobType::FULL;
    scanConfig.usrData = (void*)this;
    scanConfig.lastBackupTime = 0;
    scanConfig.useLastBackupTime = false;
    /* Path */
    scanConfig.metaPath = m_cacheRepo->path[0] + dir_sep + META;
    scanConfig.metaPathForCtrlFiles = m_cacheRepo->path[0] + dir_sep + CTRL;

    SetScanHashType(scanConfig);

    /* cb */
    scanConfig.scanResultCb = ScannerCtrlFileCallBack;
    scanConfig.scanHardlinkResultCb = ScannerHardLinkCallBack;
    scanConfig.mtimeCtrlCb = BackupDirMTimeCallBack;
    scanConfig.deleteCtrlCb = BackupDelCtrlCallBack;

    scanConfig.maxCommonServiceInstance = 1;
    scanConfig.scanCtrlMaxDataSize = Module::ConfigReader::getString(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MAX_DATASIZE");
    scanConfig.scanCtrlMinDataSize = Module::ConfigReader::getString(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_DATASIZE");
    scanConfig.scanCtrlFileTimeSec = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_FILE_TIME_SEC");
    scanConfig.scanCtrlMaxEntriesFullBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MAX_ENTRIES_FULLBKUP");
    scanConfig.scanCtrlMaxEntriesIncBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MAX_ENTRIES_INCRBKUP");
    scanConfig.scanCtrlMinEntriesFullBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_ENTRIES_FULLBKUP");
    scanConfig.scanCtrlMinEntriesIncBkup = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CTRL_MIN_ENTRIES_INCRBKUP");
    scanConfig.scanMetaFileSize = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_DEFAULT_META_FILE_SIZE");
    scanConfig.scanCheckPointEnable = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
        "DME_NAS_SCAN_CHECKPOINT_ENABLED");
}

void BuildIndexServiceJob::ScannerCtrlFileCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    DBGLOG("Callback Received for control File path: %s", controlFilePath.c_str());
    return;
}

void BuildIndexServiceJob::ScannerHardLinkCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    DBGLOG("Callback Received for control File path: %s", controlFilePath.c_str());
    return;
}

void BuildIndexServiceJob::BackupDirMTimeCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    DBGLOG("Callback Received for control File path: %s", controlFilePath.c_str());
    return;
}

void BuildIndexServiceJob::BackupDelCtrlCallBack(void *usrData, const string &controlFilePath)
{
    usrData = usrData;
    DBGLOG("Callback Received for control File path: %s", controlFilePath.c_str());
    return;
}

void BuildIndexServiceJob::FillScanConfigBaseOnProtocol(ScanConfig& scanConfig)
{
    if (m_curRepo->protocol == RepositoryProtocolType::type::NFS) {
        scanConfig.scanIO = IOEngine::LIBNFS;
        scanConfig.nfs.m_serverIp = m_curRepo->remoteHost[REPOINDEX].ip;
        scanConfig.nfs.m_serverPath = m_curRepo->remotePath;
        scanConfig.nfs.m_nasServerCheckSleepTime = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_BACKUP_NAS_SERVER_CHECK_SLEEP_TIME");
        scanConfig.nfs.maxOpendirReqCount = NUMBER8;
        INFOLOG("Scan nfs share: %s - %s", scanConfig.nfs.m_serverIp.c_str(), scanConfig.nfs.m_serverPath.c_str());
    } else {
        scanConfig.scanIO = IOEngine::LIBSMB2;
        scanConfig.smb.server = m_serviceIP;
        scanConfig.smb.share = m_curRepo->remoteName;
        scanConfig.smb.user = m_curRepo->auth.authkey;
        scanConfig.smb.password = m_curRepo->auth.authPwd;
        INFOLOG("Scan smb share: %s - %s, user: %s", scanConfig.smb.server.c_str(),
            scanConfig.smb.share.c_str(), scanConfig.smb.user.c_str());
    }
    return;
}

void BuildIndexServiceJob::FillScanConfigBaseOnProtocolPrev(ScanConfig& scanConfig)
{
    if (m_preRepo->protocol == RepositoryProtocolType::type::NFS) {
        scanConfig.scanIO = IOEngine::LIBNFS;
        scanConfig.nfs.m_serverIp = m_preRepo->remoteHost[REPOINDEX].ip;
        scanConfig.nfs.m_serverPath = m_preRepo->remotePath;
        scanConfig.nfs.m_nasServerCheckSleepTime = Module::ConfigReader::getInt(DME_NAS_CONFIG_SECTION,
            "DME_NAS_BACKUP_NAS_SERVER_CHECK_SLEEP_TIME");
        scanConfig.nfs.maxOpendirReqCount = NUMBER8;
        INFOLOG("Scan nfs share: %s - %s", scanConfig.nfs.m_serverIp.c_str(), scanConfig.nfs.m_serverPath.c_str());
    } else {
        scanConfig.scanIO = IOEngine::LIBSMB2;
        scanConfig.smb.server = m_serviceIP;
        scanConfig.smb.share = m_preRepo->remoteName;
        scanConfig.smb.user = m_preRepo->auth.authkey;
        scanConfig.smb.password = m_preRepo->auth.authPwd;
        INFOLOG("Scan smb share: %s - %s, user: %s", scanConfig.smb.server.c_str(),
            scanConfig.smb.share.c_str(), scanConfig.smb.user.c_str());
    }
    return;
}

void BuildIndexServiceJob::PrintRepo(const StorageRepository& repo)
{
    HCP_Log(INFO, MODULE) << "Enter PrintRepo!" << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "repo type: " << repo.repositoryType << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "repo isLocal: " << repo.isLocal << HCPENDLOG;
    for (auto tmp : repo.path) {
        HCP_Log(INFO, MODULE) << "repo path: " << tmp << HCPENDLOG;
    }
    HCP_Log(INFO, MODULE) << "repo protocol: " << repo.protocol << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "repo type: " << repo.repositoryType << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "repo remotePath: " << repo.remotePath << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "repo remoteName: " << repo.remoteName << HCPENDLOG;
    HCP_Log(INFO, MODULE) << "repo extendInfo: " << repo.extendInfo << HCPENDLOG;
}

void BuildIndexServiceJob::ReportJob(SubJobStatus::type status)
{
    SubJobDetails subJobDetails;
    LogDetail logDetail{};
    ActionResult result;
    std::vector<LogDetail> logDetailList;
    AddLogDetail(logDetail, "", JobLogLevel::TASK_LOG_INFO);
    REPORT_LOG2AGENT(subJobDetails, result, logDetailList, logDetail, 0, 0, status);
}

void BuildIndexServiceJob::PrepareForGenerateRfi(string preMetaFilePath, string curMetaFilePath)
{
    DBGLOG("Enter PrepareForGenerateRfi: %s, %s", preMetaFilePath.c_str(), curMetaFilePath.c_str());
    bool ret;
    if (m_curMetaFileVersion == MetaFileVersion::METAFILEVERSION_1) {
        ret = UnzipCurMetafileToWorkDirV1(curMetaFilePath);
    } else {
        ret = UnzipCurMetafileToWorkDirV2(curMetaFilePath);
    }

    if (!ret) {
        ReportJob(SubJobStatus::FAILED);
    }

    if (preMetaFilePath.empty()) {
        m_isPreparing = false;
        return;
    }

    if (m_preMetaFileVersion == MetaFileVersion::METAFILEVERSION_1) {
        ret = UnzipPreMetafileToWorkDirV1(preMetaFilePath);
    } else {
        ret = UnzipPreMetafileToWorkDirV2(preMetaFilePath);
    }

    if (!ret) {
        ReportJob(SubJobStatus::FAILED);
    }

    m_isPreparing = false;
}

bool BuildIndexServiceJob::UnzipCurMetafileToWorkDirV1(const std::string& metaFilePath)
{
    // 先删掉cache/meta
    PluginUtils::Remove(m_cacheRepo->path[0] + dir_sep + META);
    // unzip curMetaFilePath to workDir/latest
    string workDir = PluginUtils::StandardPath(m_cacheRepo->path[0] + dir_sep + META + dir_sep + LATEST);
    string rfiDir = PluginUtils::StandardPath(m_cacheRepo->path[0] + dir_sep + RFI);
    PluginUtils::CreateDirectory(workDir);
    PluginUtils::CreateDirectory(rfiDir);
    string curMetaFileZipFileName = m_metaRepo->path[0] + dir_sep + METAFILE_PARENT_DIR +
        dir_sep + METAFILE_ZIP_NAME;
    string cmd = "unzip -d " + workDir + " " + curMetaFileZipFileName;
    INFOLOG("the unzip cmd is : %s", cmd.c_str());
    return CheckUnzipMetafileSucceed(cmd, workDir);
}

bool BuildIndexServiceJob::UnzipCurMetafileToWorkDirV2(const std::string& metaFilePath)
{
    string execCmd;
    string workDir = PluginUtils::StandardPath(m_cacheRepo->path[0] + dir_sep + META + dir_sep + LATEST);
    string rfiDir = PluginUtils::StandardPath(m_cacheRepo->path[0] + dir_sep + RFI);
    PluginUtils::CreateDirectory(workDir);
    PluginUtils::CreateDirectory(rfiDir);
    string dirCacheZipFileName = m_metaRepo->path[0] + "/filemeta/metafile_DIRCACHE.gz";
    string fCacheZipFileName = m_metaRepo->path[0] + "/filemeta/metafile_FILECACHE.gz";
    string metaZipFilename  = m_metaRepo->path[0] + "/filemeta/metafile_META.gz";
    string xmetaZipFileName = m_metaRepo->path[0] + "/filemeta/metafile_XMETA.gz";
    execCmd = PrepareUnZipCommand(workDir, dirCacheZipFileName, fCacheZipFileName, metaZipFilename,
        xmetaZipFileName);
    INFOLOG("the unzip cmd is : %s", execCmd.c_str());
    return CheckUnzipMetafileSucceed(execCmd, workDir);
}

bool BuildIndexServiceJob::UnzipPreMetafileToWorkDirV1(const std::string& metaFilePath)
{
    // unzip preMetaFilePath to workDir/previous
    string workDir = PluginUtils::StandardPath(m_cacheRepo->path[0] + dir_sep + META + dir_sep + PREVIOUS);
    PluginUtils::CreateDirectory(workDir);
    string preMetaFileZipFileName = m_preMetaRepo->path[0] + dir_sep + METAFILE_PARENT_DIR +
        dir_sep + METAFILE_ZIP_NAME;
    string cmd = "unzip -d " + workDir + " " + preMetaFileZipFileName;
    INFOLOG("the unzip cmd is : %s", cmd.c_str());
    return CheckUnzipMetafileSucceed(cmd, workDir);
}

bool BuildIndexServiceJob::UnzipPreMetafileToWorkDirV2(const std::string& metaFilePath)
{
    string execCmd;
    string workDir = PluginUtils::StandardPath(m_cacheRepo->path[0] + dir_sep + META + dir_sep + PREVIOUS);
    PluginUtils::CreateDirectory(workDir);
    string dirCacheZipFileName = m_preMetaRepo->path[0] + "/filemeta/metafile_DIRCACHE.gz";
    string fCacheZipFileName = m_preMetaRepo->path[0] + "/filemeta/metafile_FILECACHE.gz";
    string metaZipFilename  = m_preMetaRepo->path[0] + "/filemeta/metafile_META.gz";
    string xmetaZipFileName = m_preMetaRepo->path[0] + "/filemeta/metafile_XMETA.gz";
    execCmd = PrepareUnZipCommand(workDir, dirCacheZipFileName, fCacheZipFileName, metaZipFilename,
        xmetaZipFileName);
    INFOLOG("the unzip cmd is : %s", execCmd.c_str());
    return CheckUnzipMetafileSucceed(execCmd, workDir);
}

bool BuildIndexServiceJob::CheckUnzipMetafileSucceed(const std::string& execCmd, const std::string& workDir)
{
    vector<string> output;
    vector<string> errOutput;
    int retryCnt = 0;
    do {
        int ret = Module::runShellCmdWithOutput(INFO, MODULE, 0, execCmd, {}, output, errOutput);
        if (ret != 0) {
            ERRLOG("unzip failed, ret: %d, retry: %d", ret, retryCnt);
            for (auto msg : errOutput) {
                ERRLOG("errmsg: %s", msg.c_str());
            }
            errOutput.clear();
            sleep(1);
            continue;
        }

        // shell命令执行未报错但是实际上没有解压过去
        if (!CheckMetafileExists(workDir)) {
            ERRLOG("unzip failed though ret code is zero, retry: %d", retryCnt);
            sleep(1);
            continue;
        }
        return true;
    } while (++retryCnt <= NORMAL_RETRY_TIMES);
    return false;
}

bool BuildIndexServiceJob::CheckMetafileExists(const std::string& workDir) const
{
    if (PluginUtils::IsFileExist(workDir + dir_sep + DIRECACHE_FILE_NAME) &&
        PluginUtils::IsFileExist(workDir + dir_sep + FILECACHE_FILE_NAME_0) &&
        PluginUtils::IsFileExist(workDir + dir_sep + META_FILE_NAME_0) &&
        PluginUtils::IsFileExist(workDir + dir_sep + XMETA_FILE_NAME_0) &&
        PluginUtils::IsFileExist(workDir + dir_sep + META_FILE_COUNT_NAME)) {
        return true;
    }
    ERRLOG("unzip metafile failed");
    return false;
}

std::string BuildIndexServiceJob::PrepareUnZipCommand(const std::string &workDir, const std::string &dcache,
    const std::string &fcache, const std::string &meta, const std::string &xmeta)
{
    std::string moveToDirCmd = "cd " + workDir + ";";
    std::string zipDirCacheFilesCmd = "tar -I pigz -xf " + dcache + " -C . &&";
    std::string zipFCacheFilesCmd = "tar -I pigz -xf " + fcache + " -C . &&";
    std::string zipMetaFilesCmd = "tar -I pigz -xf " + meta + " -C . &&";
    std::string zipXMetaFilesCmd = "tar -I pigz -xf " + xmeta + " -C . ";
    string execCmd = moveToDirCmd + zipDirCacheFilesCmd + zipFCacheFilesCmd + zipMetaFilesCmd + zipXMetaFilesCmd;
    return execCmd;
}

void BuildIndexServiceJob::FillScanConfigForGenerateRfi(ScanConfig& scanConfig, bool isFullScan)
{
    INFOLOG("Enter FillScanConfig");
    scanConfig.jobId = m_indexPara->jobId;
    scanConfig.subJobId = m_subJobInfo->subJobId;
    scanConfig.copyId = m_curRepoExtendInfo->copyId;
    scanConfig.scanType = ScanJobType::RFI_GEN;
    scanConfig.scanIO = IOEngine::DEFAULT;
    scanConfig.lastBackupTime = 0;

    /* config meta path */
    scanConfig.metaPath = m_cacheRepo->path[0] + dir_sep + META;
    scanConfig.metaPathForCtrlFiles = m_cacheRepo->path[0] + dir_sep + RFI;
    scanConfig.curDcachePath = m_cacheRepo->path[0] + dir_sep + META + dir_sep + LATEST;
    scanConfig.prevDcachePath = m_cacheRepo->path[0] + dir_sep + META + dir_sep + PREVIOUS;
    scanConfig.indexPath = m_indexRepo->path[0];
    scanConfig.maxOpendirReqCount = MAX_OPEN_DIR_REQ_COUNT;
    scanConfig.generatorIsFull = isFullScan;

    // /* 记录线程数 */
    scanConfig.maxCommonServiceInstance = 1;
    scanConfig.scanCtrlMaxDataSize = to_string(ONE_GB);
    scanConfig.scanCtrlMinDataSize = to_string(HALF_GB);
    scanConfig.scanCtrlFileTimeSec = SCAN_CTRL_FILE_TIMES_SEC;
    scanConfig.scanCtrlMaxEntriesFullBkup = SCAN_CTRL_MAX_ENTRIES_FULL_BACKUP;
    scanConfig.scanCtrlMaxEntriesIncBkup = SCAN_CTRL_MAX_ENTRIES_INCBKUP;
    scanConfig.scanCtrlMinEntriesFullBkup = SCAN_CTRL_MIN_ENTRIES_FULL_BKUP;
    scanConfig.scanCtrlMinEntriesIncBkup = SCAN_CTRL_MIN_ENTRIES_INC_BKUP;
    scanConfig.scanMetaFileSize = ONE_GB;
    scanConfig.maxWriteQueueSize = SCAN_CTRL_MAX_QUEUE_SIZE;
    scanConfig.scanResultCb = GeneratedCopyCtrlFileCb;
    scanConfig.scanHardlinkResultCb = GeneratedHardLinkCtrlFileCb;
    scanConfig.rfiCtrlCb = GenerateRfiCtrlFileCb;
    HCP_Log(INFO, MODULE) << "EXIT FillScanConfig" << HCPENDLOG;
}

void BuildIndexServiceJob::GeneratedCopyCtrlFileCb(void *usrData, string ctrlFile)
{
    (void)usrData;
    DBGLOG("GeneratedCopyCtrlFileCb: %s", ctrlFile.c_str());
}

void BuildIndexServiceJob::GeneratedHardLinkCtrlFileCb(void *usrData, string ctrlFile)
{
    (void)usrData;
    DBGLOG("GenreateHardlinkCtrlFileCb: %s", ctrlFile.c_str());
}

void BuildIndexServiceJob::GenerateRfiCtrlFileCb(void* /* usrData */, RfiCbStruct cbParam)
{
    INFOLOG("rfi cb : jobId - %s, subjobId - %s, copyId - %s, rfiFileName - %s, isComplete - %d, isFailed %d",
        cbParam.jobId.c_str(), cbParam.subJobId.c_str(), cbParam.copyId.c_str(), cbParam.rfiZipFileName.c_str(),
        cbParam.isComplete, cbParam.isFailed);
    ActionResult result;
    LogDetail logDetail;
    logDetail.__set_timestamp(std::chrono::duration_cast<std::chrono::milliseconds>(
            std::chrono::system_clock::now().time_since_epoch()).count());
    logDetail.__set_level(JobLogLevel::TASK_LOG_INFO);
    logDetail.__set_description("generate a rfi file success");
    std::vector<LogDetail> logDetailList;
    logDetailList.push_back(logDetail);
    SubJobDetails subJobDetails;
    subJobDetails.__set_jobId(cbParam.jobId);
    subJobDetails.__set_subJobId(cbParam.subJobId);
    subJobDetails.__set_logDetail(logDetailList);
    if (cbParam.isFailed) {
        subJobDetails.__set_jobStatus(SubJobStatus::FAILED);
        JobService::ReportJobDetails(result, subJobDetails);
        return;
    }
    if (cbParam.isComplete) {
        subJobDetails.__set_progress(PROGRESS_COMPLETE);
        subJobDetails.__set_jobStatus(SubJobStatus::COMPLETED);
    } else {
        subJobDetails.__set_progress(0);
        subJobDetails.__set_jobStatus(SubJobStatus::RUNNING);
    }
    string extendInfo {};
    RfiGeneratationParam param;
    param.copyId = cbParam.copyId;
    param.rfiFiles.push_back(cbParam.rfiZipFileName);
    JsonHelper::StructToJsonString(param, extendInfo);
    INFOLOG("Report RFI struct: %s", extendInfo.c_str());
    subJobDetails.__set_extendInfo(extendInfo);
    JobService::ReportJobDetails(result, subJobDetails);
    return;
}