#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import stat
import time
import shutil
import re
import datetime
import sys
import hashlib

from common.common_models import SubJobDetails, LogDetail
from common.exception.common_exception import ErrCodeException
from common.const import SubJobStatusEnum, RepositoryDataTypeEnum, SubJobPriorityEnum, ParamConstant
from common.number_const import NumberConst
from ndmp.comm.const import SubJobType, NDMPCode, BackupType, LogLevel, CopyFormat, NdmpCopyStatus, NdmpLabel, \
    ErrorCode, Constant, NdmpFileAttrIndex
from ndmp.comm.log import log
from ndmp.comm.utils import report_job_details, format_capacity, remove_file
from ndmp.comm.share_resource_manager import ShareResourceManager
from ndmp.schemas.ndmp_schemas import ResultInfo, SubJob, TaskStats
from ndmp.service.client.ndmp_client import NdmpClientInterfaceS
from ndmp.service.ndmp_service_base import NDMPServiceBase
from ndmp.service.backup.parse_backup_param import BackupParam


class NDMPBackupService(NDMPServiceBase):
    def __init__(self, req_id: str, job_id: str, sub_id: str):
        super().__init__(req_id, job_id, sub_id)
        self.param = BackupParam(self.req_id)
        self.backup_type = self.param.get_backup_type()
        self.filter_info = self.param.get_filter_info()
        self.copy_id = self.param.get_copy_id()
        self.task_status = SubJobStatusEnum.RUNNING.value
        self.backup_level = ""
        self.file_path_list: list = self.param.get_file_path()
        self.last_process_size = 0
        self.copy_format = self.param.get_copy_format()
        self.files_cnt = 0
        self.label_dict = self.init_label_dict()
        self.start_time = int(time.time())

    def init_label_dict(self):
        label_dict = super().init_label_dict()
        label_dict.update({NdmpCopyStatus.INTERNAL_ERROR: {"label": NdmpLabel.EXECUTE_BACKUP_FAILED_LABEL,
                                                           "param": [self.sub_id]}})
        return label_dict

    def allow_backup_in_local_node(self):
        """
        功能描述：检查当前节点是否可以执行备份
        """
        log.info(f"allow_backup_in_local_node success, job id:{self.job_id}.")
        response = ResultInfo()
        self.report_result(response.dict(by_alias=True))
        return True

    def check_backup_job_type(self):
        """
        功能描述：检查是否需要转为全量备份
        """

        def check_copy_is_not_exist():
            """
            功能描述：检查全量副本是否存在
            """
            param = self.param.get_meta_info()
            log.info(f"meta_info,{param}")
            try:
                if self.backup_type == BackupType.PERMANENT_INCREMENTAL_BACKUP:
                    if param["fullBackup"]["copyId"]:
                        return False
                elif self.backup_type == BackupType.DIFFERENTIAL_INCREMENTAL_BACKUP:
                    trans_level = param["diffBackup"]["copyLevel"]
                    if param["fullBackup"]["copyId"] and (trans_level == "" or int(trans_level) < 9):
                        return False
            except KeyError:
                log.err("key not exist")
            return True

        log.info(f"check_backup_job_type start, job id:{self.job_id}.")
        response = ResultInfo()
        if self.backup_type == BackupType.FULL_BACKUP.value:
            self.report_result(response.dict(by_alias=True))
            return True
        if check_copy_is_not_exist():
            response = ResultInfo(code=NDMPCode.FAILED.value, bodyErr=ErrorCode.ERROR_INCREMENT_TO_FULL,
                                  message='Can not apply this type backup job')
            log.info(f"change backup_type to full")
            self.report_result(response.dict(by_alias=True))
            return True
        self.report_result(response.dict(by_alias=True))
        return False

    def backup_prerequisite_check_last_job_stat(self):
        copy_id_list = []
        tmp_file_path = ParamConstant.RESULT_PATH
        for file in os.listdir(tmp_file_path):
            abort_id = re.findall('ndmp_(.*)_abort', file)
            fail_id = re.findall('ndmp_(.*)_back_fail', file)
            if abort_id:
                copy_id_list.append(abort_id[0])
                rm_file = os.path.join(tmp_file_path, file)
                os.remove(rm_file)
            if fail_id:
                copy_id_list.append(fail_id[0])
                rm_file = os.path.join(tmp_file_path, file)
                os.remove(rm_file)
        return copy_id_list

    def backup_prerequisite(self):
        """
         功能描述：备份前置任务
        """
        log.info(f"Enter backup_prerequisite job id:{self.job_id}.")
        copy_id_list = self.backup_prerequisite_check_last_job_stat()
        tmp_file_path = self.param.get_repository_path(RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        for copy_id in copy_id_list:
            rm_file_path = os.path.join(tmp_file_path, copy_id)
            shutil.rmtree(rm_file_path, ignore_errors=True)
        log.info(f"backup_prerequisite success, job id:{self.job_id}.")
        response = ResultInfo()
        self.report_result(response.dict(by_alias=True))
        return True

    def backup_prerequisite_progress(self):
        """
         功能描述：备份前置任务进度上报
        """
        log.info(f"backup_prerequisite_progress success, job id:{self.job_id}.")
        progress = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        self.report_result(progress.dict(by_alias=True))
        return True

    def backup_gen_sub_job(self):
        """
        功能描述：分解子任务
        """
        log.info(f"backup_gen_sub_job start, job id:{self.job_id}.")
        if self.file_path_list:
            # 用户指定文件系统路径，控制多流备份任务
            self.generate_sub_task()
        else:
            # 用户未指定文件系统路径，起一个备份任务备份整个文件系统
            response = [
                SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName=str(self.job_id),
                       ignoreFailed=False).dict(by_alias=True),
                SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName="queryCopy",
                       jobPriority=SubJobPriorityEnum.JOB_PRIORITY_4).dict(by_alias=True)
            ]
            self.report_result(response)
        return True

    def generate_sub_task(self):
        # 根据file_path_list，生成子任务，每个任务优先级最多生成10个子任务
        start_index = 0
        path_count = len(self.file_path_list)
        cur_job_priority = 1
        response = list()
        while start_index < path_count:
            # 剩余路径个数大于10个，取10个作为同一优先级
            if path_count - start_index > 10:
                tmp_file_path_list = self.file_path_list[start_index:start_index + 10]
            else:
                tmp_file_path_list = self.file_path_list[start_index:]
            for path in tmp_file_path_list:
                log.info(f"Add new sub job with path: {path}")
                # 待支持多通道后，遍历ip填入，例如：ip = "*.*.*.*" if i % 2 == 0 else "*.*.*.*"
                ip = self.param.get_dst_ip()
                job_info = json.dumps({"path": path, "ip": ip})
                sub_job = SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName=str(self.job_id),
                                 jobPriority=cur_job_priority, ignoreFailed=False, jobInfo=job_info).dict(by_alias=True)
                response.append(sub_job)
            cur_job_priority += 1
            start_index += 10
        # 生成查询副本子任务
        response.append(
            SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName="queryCopy",
                   jobPriority=cur_job_priority).dict(by_alias=True)
        )
        # 数据库框架实现限制生成子任务只能一次性上报
        self.report_result(response)

    def check_filter(self):
        """
        功能描述：检查过滤条件
        """
        log.info(f"check backup filter info, {self.filter_info}")
        if not self.filter_info or len(self.filter_info) == 0:
            return True, "The filter_info is empty."
        if len(self.filter_info) > 1024:
            return False, "The total length of the filter_info more than 1024 characters."
        filter_result = self.filter_info.split(",")
        if len(filter_result) > 32:
            return False, "The number of the filter_info more than 32."
        for filter_info in filter_result:
            if len(filter_info) > 255:
                return False, "The length of a single filter_info more than 255 characters."
        return True, "The filter_info check success."

    def backup(self):
        """
        功能描述：执行子任务
        """
        # 根据子任务名称判断是否为上报副本子任务
        try:
            self.req_opt_ip_rule(self.param.get_src_ip(), "add")
            self.req_opt_ip_rule(self.param.get_dst_ip(), "add")
            ret = self.backup_inner()
            self.req_opt_ip_rule(self.param.get_src_ip(), "del")
            self.req_opt_ip_rule(self.param.get_dst_ip(), "del")
        except Exception as e:
            log.error(f"Error occurs in backup: {e}", exc_info=True)
            self.report_failed(NdmpCopyStatus.INTERNAL_ERROR)
            return False
        return ret

    def backup_inner(self):
        log.info(f"backup_job start, job id:{self.job_id}.")
        check_result, message = self.check_filter()
        if not check_result:
            log.error(f"execute backup_job_check_filter failed.")
            self.report_failed(NdmpCopyStatus.INTERNAL_ERROR)
            return False

        self.report_backup_start()
        backup_info = self.format_auth_info()
        ret = self.ndmp_client.backup(backup_info)
        if ret < 0:
            log.error(f"execute backup_job failed.")
            self.report_failed(ret)
            return False
        self.start_time = int(time.time())
        last_report_time = self.start_time
        while True:
            ndmp_status = self.ndmp_client.get_task_info()
            code = ndmp_status.status
            process_size = int(ndmp_status.process_bytes)
            finish_flag = self.check_backup_status(code)
            if self.abort_inner():
                break
            if finish_flag:
                self.files_cnt = int(ndmp_status.files_cnt)
                break
            self.update_task_stat(process_size)
            current_time = int(time.time())
            if current_time - last_report_time > 120: # 2分钟上报一次任务进度
                self.report_progress(process_size, self.task_status)
                last_report_time = current_time
                log.info(f"backup_speed: {self.speed}, query code: {code}, query size: {process_size}, "
                        f"remain_bytes: {ndmp_status.remain_bytes}")
            time.sleep(1)
        if self.task_status == SubJobStatusEnum.COMPLETED.value:
            self.write_tmp_file_for_back_succ()
            self.update_task_stat(process_size)
            self.report_sub_job_complete(process_size)
        else:
            self.write_tmp_file_for_back_fail()            
            msg = self.read_log_message_file_for_back_failure()
            if (msg != ""):
                self.report_sub_job_err_msg(msg)
            self.report_failed(NdmpCopyStatus.INTERNAL_ERROR)
        self.ndmp_client.destroy()  # 结束客户端监控线程
        data_root_path = self.param.get_repository_path(RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        log_msg_file = os.path.join(data_root_path, f"{self.copy_id}", "BACKUP_LOG_MESSAGE")
        os.remove(log_msg_file)
        return True

    def read_log_message_file_for_back_failure(self):
        data_root_path = self.param.get_repository_path(RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        log_msg_file = os.path.join(data_root_path, f"{self.copy_id}", "BACKUP_LOG_MESSAGE")
        err_msg_set = set()
        ret_msg = ""
        try:
            flags = os.O_RDONLY
            modes = stat.S_IRUSR
            with os.fdopen(os.open(log_msg_file, flags, modes), 'r', encoding=Constant.UTF_8) as f:
                lines = f.readlines()
            for line in lines:
                if ("Warning:" in line or "Error:" in line or
                    "ERROR:" in line):
                    err_msg_set.add(line)
            for msg in err_msg_set:
                ret_msg += msg
        except Exception as e:
            log.error(e, exc_info=True)
        return ret_msg

    def write_tmp_file_for_back_succ(self):
        succ_file = os.path.join(ParamConstant.RESULT_PATH, f"ndmp_{self.job_id}_back_succ")
        if not os.path.exists(succ_file):
            flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
            modes = stat.S_IWUSR | stat.S_IRUSR
            with os.fdopen(os.open(succ_file, flags, modes), 'w', encoding=Constant.UTF_8) as m_file:
                m_file.write("backup succ!")

    def write_tmp_file_for_back_fail(self):
        fail_file = os.path.join(ParamConstant.RESULT_PATH, f"ndmp_{self.job_id}_back_fail")
        if not os.path.exists(fail_file):
            flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
            modes = stat.S_IWUSR | stat.S_IRUSR
            with os.fdopen(os.open(fail_file, flags, modes), 'w', encoding=Constant.UTF_8) as m_file:
                m_file.write("backup fail!")

    def update_task_stat(self, process_size):
        """
        从cache仓下读取任务状态文件并计算速度
        """
        cache_repository_path = self.param.get_repository_path(RepositoryDataTypeEnum.CACHE_REPOSITORY.value)
        cache_path = os.path.join(cache_repository_path, Constant.TASK_STATUS_FILE)
        read_resource_manager = ShareResourceManager(cache_path, "r")
        json_str = read_resource_manager.read()
        read_resource_manager.close()
        # json_str为空，说明cache文件刚创建
        if not json_str:
            log.info("First calculate stats!")
            task_status = TaskStats(copySize=process_size, startTime=self.start_time).dict(by_alias=True)
        else:
            try:
                task_status = json.loads(json_str)
            except json.JSONDecodeError:
                log.info(f"json_str: {json_str}")
                task_status = TaskStats(copySize=self.last_process_size, startTime=self.start_time).dict(by_alias=True)
            # 这里的process_size是共拷贝数据量，因此需要减去之前已统计过的
            total_size = task_status["copySize"] + process_size - self.last_process_size
            task_status["copySize"] = total_size
            start_time = task_status["startTime"]
            current_time = int(time.time())
            self.speed = (total_size / 1024) // (current_time - start_time) if (current_time - start_time) != 0 else 0
        task_status["fileCnt"] += self.files_cnt
        task_status_json_str = json.dumps(task_status)
        update_resource_manager = ShareResourceManager(cache_path, "w+")
        update_resource_manager.update(task_status_json_str)
        update_resource_manager.close()
        self.last_process_size = process_size  # 更新已统计的数据量

    def report_backup_start(self):
        """
        功能描述：上报开始执行备份子任务
        """
        log_detail = LogDetail(logInfo=NdmpLabel.START_BACKUP_LABEL, logInfoParam=[self.sub_id],
                               logLevel=LogLevel.INFO.value)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id,
                                        progress=NumberConst.FIVE, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.RUNNING.value).dict(by_alias=True)
        report_job_details(self.req_id, self.sub_id, sub_job_details)

    def report_progress(self, process_size, task_status):
        log.info("report_backup_process")
        capacity = format_capacity(process_size)
        log_detail = LogDetail(logInfo=NdmpLabel.EXECUTE_BACKUP_LABEL,
                               logInfoParam=[self.sub_id, capacity],
                               logLevel=LogLevel.INFO)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, logDetail=[log_detail],
                                        speed=self.speed, data_size=process_size, taskStatus=task_status,
                                        progress=NumberConst.FIFTY)
        report_job_details(self.req_id, self.job_id, sub_job_details.dict(by_alias=True))

    def report_sub_job_complete(self, size):
        log.info(f"Report sub job complete, job: {self.job_id}, subJob: {self.sub_id}")
        capacity = format_capacity(size)
        log_detail = LogDetail(logInfo=NdmpLabel.PLUGIN_BACKUP_SUBJOB_SUCCESS_LABEL,
                               logInfoParam=[self.sub_id, str(self.files_cnt), capacity], logLevel=LogLevel.INFO)
        report_job_details(self.req_id, self.job_id,
                           SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                         logDetail=[log_detail],
                                         dataSize=int(size / 1024), speed=self.speed,
                                         taskStatus=SubJobStatusEnum.COMPLETED.value).dict(by_alias=True))
    
    def report_sub_job_err_msg(self, msg):
        log.info(f"Report sub job dailed, job: {self.job_id}, subJob: {self.sub_id}")
        log_detail = LogDetail(logInfo=NdmpLabel.NDMP_PLUGIN_BACKUP_PROCESS_MSG_LABEL,
                               logInfoParam=[self.sub_id, msg], logLevel=LogLevel.ERROR)
        report_job_details(self.req_id, self.job_id,
                           SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                         logDetail=[log_detail], speed=self.speed,
                                         taskStatus=SubJobStatusEnum.FAILED.value).dict(by_alias=True))        

    def report_backup_complete(self):
        cache_repository_path = self.param.get_repository_path(RepositoryDataTypeEnum.CACHE_REPOSITORY.value)
        # cache_path为记录任务统计信息的临时文件，在执行备份子任务时创建
        cache_path = os.path.join(cache_repository_path, Constant.TASK_STATUS_FILE)
        share_resource_manager = ShareResourceManager(cache_path, "r")
        json_str = share_resource_manager.read()
        share_resource_manager.close()
        try:
            task_status = json.loads(json_str)
        except json.JSONDecodeError:
            log.info(f"json_str: {json_str}")
            task_status = TaskStats(copySize=0, startTime=int(time.time())).dict(by_alias=True)
        file_cnt = task_status.get("fileCnt", 0)
        total_size = task_status.get("copySize", 0)
        capacity_str = format_capacity(total_size)
        log.info(f"Report backup success, job: {self.job_id}, copy files: {file_cnt}, size: {capacity_str}")
        log_detail = LogDetail(logInfo=NdmpLabel.EXECUTE_BACKUP_SUCCESS_LABEL,
                               logInfoParam=[str(file_cnt), capacity_str], logLevel=LogLevel.INFO)
        report_job_details(self.req_id, self.job_id,
                           SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                         logDetail=[log_detail], dataSize=int(total_size / 1024),
                                         taskStatus=SubJobStatusEnum.COMPLETED.value).dict(by_alias=True))
        # 删除记录任务统计信息的临时文件
        remove_file(cache_path)

    def report_failed(self, error_code):
        log.error(f"backup failed, job id: {self.job_id}, error code: {error_code}")
        # 获取错误码对应的label和label参数进行上报
        label = self.label_dict.get(error_code, {}).get("label", NdmpLabel.EXECUTE_BACKUP_FAILED_LABEL)
        param = self.label_dict.get(error_code, {}).get("param", None)
        log_detail = LogDetail(logInfo=label, logInfoParam=param, logLevel=LogLevel.ERROR, logDetail=0)
        report_job_details(self.req_id, self.job_id,
                           SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                         logDetail=[log_detail],
                                         taskStatus=SubJobStatusEnum.FAILED.value).dict(by_alias=True))

    def check_backup_status(self, code):
        finish_flag = True
        if code == NdmpCopyStatus.INIT:
            self.task_status = SubJobStatusEnum.INITIALIZING.value
            finish_flag = False
        elif (code == NdmpCopyStatus.PROCESSING or code == NdmpCopyStatus.COMPLETE):
            self.task_status = SubJobStatusEnum.RUNNING.value
            finish_flag = False
        elif code == NdmpCopyStatus.ABORT:
            self.task_status = SubJobStatusEnum.ABORTED.value
        elif code == NdmpCopyStatus.FINISH:
            self.task_status = SubJobStatusEnum.COMPLETED.value
        else:
            if code == NdmpCopyStatus.INTERNAL_ERROR and self.copy_format == 1:
                log.warning("This is backup from NetApp, temporarily considered successful")
                self.task_status = SubJobStatusEnum.COMPLETED.value
            else:
                self.task_status = SubJobStatusEnum.FAILED.value
        return finish_flag

    def format_auth_info(self):
        s_authkey, s_authpwd, s_ip = self.param.get_src_auth(), self.param.get_src_pwd(), self.param.get_src_ip()
        r_authkey, r_authpwd, r_ip = self.param.get_dst_auth(), self.param.get_dst_pwd(), self.param.get_dst_ip()
        level = self.format_transfer_level()
        s_port = self.param.get_port()
        s_path, r_path = self.format_path()
        back_file_path = r_path
        exclude = self.param.get_filter_info()
        dump = self.copy_format
        r_ip = self.param.get_sub_job_info().get("ip", "") if self.param.get_sub_job_info().get("ip", "") else r_ip
        return NdmpClientInterfaceS(s_authkey.encode(), r_authkey.encode(), s_ip.encode(), r_ip.encode(),
                                    level.encode(), s_authpwd.encode(), r_authpwd.encode(), s_path.encode(),
                                    r_path.encode(), back_file_path.encode(), exclude.encode(), dump, s_port)

    def format_path(self):
        s_path = self.param.get_src_path()
        r_path = self.format_dts_path()
        sub_path = self.param.get_sub_job_info().get("path", "")  # 文件系统下的目录
        if sub_path:
            log.info(f"Sub path isn't empty, concat {sub_path}")
            s_path = os.path.join(s_path, sub_path)
            r_path = os.path.join(r_path, sub_path)
        return s_path, r_path

    def format_dts_path(self):
        """
        功能描述：根据副本格式指定远端路径
        """
        def create_dir():
            path = self.param.get_repository_path(RepositoryDataTypeEnum.DATA_REPOSITORY.value) + "/" + self.copy_id
            log.info(f"creat_path,{path}")
            if not os.path.exists(path):
                os.mkdir(path)
            return path

        
        default_user_pre = "/System_vStore"
        if self.copy_format == CopyFormat.DIRECTORY:
            path = create_dir()
            return path
        elif self.copy_format == CopyFormat.SNAPSHOT:
            return default_user_pre + self.param.get_dst_path()
        return default_user_pre + self.param.get_dst_path()

    def format_transfer_level(self):
        """
        参数说明：设置NDMP传输级别
        """
        if self.backup_type == BackupType.FULL_BACKUP.value:
            return str(0)
        elif self.backup_type == BackupType.DIFFERENTIAL_INCREMENTAL_BACKUP.value:
            return str(1)
        return str(0)

    def backup_post_job(self):
        """
        功能描述：后置任务
        备份成功写metafile
        备份失败清理资源
        """
        abort_file = os.path.join(ParamConstant.RESULT_PATH, f"ndmp_{self.job_id}_abort")
        succ_file = os.path.join(ParamConstant.RESULT_PATH, f"ndmp_{self.job_id}_back_succ")
        fail_file = os.path.join(ParamConstant.RESULT_PATH, f"ndmp_{self.job_id}_back_fail")
        log.info(f"backup_post_job start, job id:{self.job_id}.")
        if os.path.exists(succ_file):
            # 记录备份level
            self.write_meta_info()
            os.remove(succ_file)
        else:
            path = self.param.get_repository_path(RepositoryDataTypeEnum.DATA_REPOSITORY.value)
            copy_path = os.path.join(path, self.job_id)
            self.clear_back_copies_file(copy_path)
            if os.path.exists(abort_file):
                os.remove(abort_file)
            if os.path.exists(fail_file):
                os.remove(fail_file)
        response = ResultInfo()
        self.report_result(response.dict(by_alias=True))
        self.report_backup_post_complete()
        return True

    def backup_post_job_progress(self):
        """
         功能描述：备份后置任务进度上报
        """
        log.info(f"backup_post_job_progress success, job id:{self.job_id}.")
        progress = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        self.report_result(progress.dict(by_alias=True))
        return True

    def abort_backup_job(self):
        """
        功能描述：停止备份任务
        """
        log.info(f'execute abort_job, job_id:{self.job_id},sub_job_id:{self.sub_id}')
        result_path = ParamConstant.RESULT_PATH
        file_name = f"ndmp_{self.job_id}_abort"
        abort_file = os.path.join(result_path, file_name)
        if not os.path.exists(abort_file):
            flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
            modes = stat.S_IWUSR | stat.S_IRUSR
            with os.fdopen(os.open(abort_file, flags, modes), 'w', encoding=Constant.UTF_8) as m_file:
                m_file.write("abort backup!")
            log.info("--debug-- abort_start")
        success_file_path = os.path.join(ParamConstant.RESULT_PATH, f"ndmp_{self.job_id}_abort_success")
        start_time = cur_time = int(time.time())
        while cur_time - start_time < 120:
            if os.path.exists(success_file_path):
                os.remove(success_file_path)
                log.info(f"abort success, job id: {self.job_id}")
                break
            time.sleep(1)
            cur_time = int(time.time())
        log.info("--debug-- abort_complete")
        response = ResultInfo().dict(by_alias=True)
        log.info(f"report abort,{response}")
        self.report_result(response)
        return True


    def clear_back_copies_file(self, path):
        log.info(f"job aborted, rm back file:{path}")
        shutil.rmtree(path, ignore_errors=True)

    def abort_inner(self):
        """
        功能描述：插件下发停止任务
        """
        file_path = os.path.join(ParamConstant.RESULT_PATH, f"ndmp_{self.job_id}_abort")
        success_file_path = os.path.join(ParamConstant.RESULT_PATH, f"ndmp_{self.job_id}_abort_success")
        if os.path.exists(file_path):
            conf = self.format_auth_info()
            ret = self.ndmp_client.abort(conf)
            log.info(f"--debug-- abort_ret,{ret}")
            if ret == NDMPCode.SUCCESS.value:
                flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
                modes = stat.S_IWUSR | stat.S_IRUSR
                with os.fdopen(os.open(success_file_path, flags, modes), 'w', encoding=Constant.UTF_8) as m_file:
                    m_file.write("abort backup success!")
                self.task_status = SubJobStatusEnum.ABORTED.value
                log.info("--debug-- abort_process")
                return True
        return False

    def query_backup_copy(self):
        log.info(f"start to query backup copy, job_id:{self.job_id}, backType:{self.backup_type}")
        # 上报备份任务完成
        self.report_backup_complete()
        copy_info = {}
        try:
            copy_info['id'] = self.copy_id
            repositories_infos = self.param.get_repositories()
            repositories = []
            path_suffix = f"/{self.copy_id}" if self.copy_format == CopyFormat.DIRECTORY else ""
            for repository in repositories_infos:
                repository_type = int(repository['repositoryType'])
                if repository_type == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                    repository['remotePath'] = f"{repository['remotePath']}" + path_suffix
                    repositories.append(repository)
                    break
            copy_info['repositories'] = repositories
            backup_level = ""
            if self.backup_type == BackupType.FULL_BACKUP:
                backup_level = "0"
            elif self.backup_type == BackupType.CUMULATIVE_INCREMENTAL_BACKUP:
                backup_level = "1"
            elif self.backup_type == BackupType.DIFFERENTIAL_INCREMENTAL_BACKUP:
                backup_level = self.param.get_backup_level()
            copy_info['extendInfo'] = {'level': backup_level, 'copy_id': self.copy_id}
            log.info(f"repositories,{repositories}")
        except Exception as e:
            log.error(f"error occured in query_backup_copy {e}", exc_info=True)
        self.report_result(copy_info)

    def write_meta_info(self):
        meta_path = self.param.get_repository_path(RepositoryDataTypeEnum.META_REPOSITORY.value)
        file_name = "ndmp_backup_info"
        meta_info_file = os.path.join(meta_path, file_name)
        param = self.param.get_meta_info()
        flags = os.O_WRONLY | os.O_CREAT
        modes = stat.S_IWUSR | stat.S_IRUSR
        with os.fdopen(os.open(meta_info_file, flags, modes), 'w') as file:
            try:
                if self.backup_type == BackupType.FULL_BACKUP:
                    param["fullBackup"]["copyId"] = self.copy_id
                    param["diffBackup"]["copyId"] = ""
                    param["diffBackup"]["copyLevel"] = ""
                elif self.backup_type == BackupType.PERMANENT_INCREMENTAL_BACKUP:
                    param["incrementBackup"].append({"copyId": self.copy_id})
                elif self.backup_type == BackupType.DIFFERENTIAL_INCREMENTAL_BACKUP:
                    param["diffBackup"]["copyId"] = self.copy_id
                    log.info(f"level,{self.backup_level}")
                    param["diffBackup"]["copyLevel"] = self.backup_level
            except KeyError:
                log.err("key not found")
            file.seek(0)
            file.truncate()
            log.info(f"copy_info,{param}")
            file.write(json.dumps(param))
            file.close()
        self.generate_meta_rfi()


    def generate_meta_rfi(self):
        data_path = self.param.get_repository_path(RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        index_tmp = os.path.join(data_path, self.copy_id, "index.tmp")

        log.info(f"Entering rfi file, cur_copy_id:{self.copy_id}, index_path:{index_tmp}")
        rfi_file = "index_" + self.copy_id + ".txt"
        meta_path = self.param.get_repository_path(RepositoryDataTypeEnum.META_REPOSITORY.value)
        rfi_path = os.path.join(meta_path, rfi_file)

        if os.path.exists(index_tmp):
            try:
                shutil.move(index_tmp, rfi_path)
                self.report_backup_post_complete()
            except Exception as e:
                log.error(e, exc_info=True)
                self.report_backup_post_faild()          
        else:
            self.init_rfi_file_header(rfi_path)
            try:
                file_attr_dict = self.back_get_tmp_dir_file()
                parent_dict, dir_dict = self.index_get_dir_name_and_parent_node(file_attr_dict)
                self.write_rfi_with_status(rfi_path, file_attr_dict, parent_dict, dir_dict, "new")
            except Exception as e:
                log.error(e, exc_info=True)
                self.report_backup_post_faild()
    
    def index_get_dir_name_and_parent_node(self, file_attr_dict):
        dir_dict = dict()
        parent_dict = dict()
        data_path = self.param.get_repository_path(RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        tmp_file = os.path.join(self.copy_id, "FILE_CACHE_TMP")
        cache_file = os.path.join(data_path, tmp_file)
        flags = os.O_RDONLY
        modes = stat.S_IRUSR
        with os.fdopen(os.open(cache_file, flags, modes), 'r', encoding=Constant.UTF_8) as dir_file:
            while True:
                line = dir_file.readline()
                if not line:
                    break
                one_line = json.loads(line.strip())
                file_name = one_line.get("name")
                node_id = one_line.get("nodeId")
                if (file_name == '.' or file_name == '..'
                    or not file_attr_dict.get(node_id)): # linux文件夹一定存在"." 或".."文件夹，不需要处理生成索引。
                    continue
                file_type = file_attr_dict[node_id][NdmpFileAttrIndex.INDEX_TYPE]
                parent_dict[node_id] = one_line.get("parentId")            
                if file_type == "d":
                    dir_dict[node_id] = one_line.get("name")
        return parent_dict, dir_dict

    def init_rfi_file_header(self, file):
        first_line = "{\"title\": \"Raw File-system Index Database\",\"version\": \"2.0\",\"time\":"
        first_line = first_line + "\"" + self.get_timestamp() + "\"" + "}\n"
        flags = os.O_WRONLY | os.O_CREAT
        modes = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR
        with os.fdopen(os.open(file, flags, modes), 'w', encoding=Constant.UTF_8) as rfi_file:
            rfi_file.write(first_line)
        
    def get_timestamp(self):
        stamp = str(int(time.mktime(datetime.datetime.now(datetime.timezone.utc).timetuple())))
        return stamp + str("%06d" % datetime.datetime.now(datetime.timezone.utc).microsecond)

    def back_get_tmp_dir_file(self):
        start_time = int(time.time())
        file_attr_dict = dict()
        data_path = self.param.get_repository_path(RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        tmp_file = os.path.join(self.copy_id, "NODE_CACHE_TMP")
        cache_file = os.path.join(data_path, tmp_file)
        flags = os.O_RDONLY
        modes = stat.S_IRUSR
        with os.fdopen(os.open(cache_file, flags, modes), 'r', encoding=Constant.UTF_8) as dir_file:
            while True:
                line = dir_file.readline()
                if line == "" or line is None:
                    break
                one_file = json.loads(line)
                node_id = one_file.get("nodeId")
                file_attr = [one_file.get("fhInfo"), one_file.get("type"), one_file.get("mtime"), one_file.get("size")]
                file_attr_dict[node_id] = file_attr
                cur_time = int(time.time())
                if cur_time - start_time > 120:
                    self.report_backup_post_running()
                    start_time = int(time.time())
        return file_attr_dict

    def write_rfi_with_status(self, rfi_path, file_attr_dict, parent_dict, dir_dict, status):
        log.debug(f"Entering status")
        start_time = int(time.time())
        data_path = self.param.get_repository_path(RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        tmp_file = os.path.join(self.copy_id, "FILE_CACHE_TMP")
        node_file = os.path.join(data_path, tmp_file)
        input_str = ""
        nums = 0
        flags = os.O_RDONLY
        modes = stat.S_IRUSR
        with os.fdopen(os.open(node_file, flags, modes), 'r', encoding=Constant.UTF_8) as dir_file:
            while True:
                line = dir_file.readline()
                if line == "" or line is None:
                    break
                one_line = json.loads(line.strip())
                file_name = one_line.get("name")
                node_id = one_line.get("nodeId")
                if (file_name == '.' or file_name == '..'
                    or not file_attr_dict.get(node_id)): # linux文件夹一定存在"." 或".."文件夹，不需要处理生成索引。
                    continue
                input_str += self.write_rfi_file_info_line(one_line, file_attr_dict, dir_dict, parent_dict, status)
                nums = nums + 1
                if nums >= 100000:
                    self.write_rfi_file_info(input_str, rfi_path)
                    input_str = ""
                    nums = 0
                cur_time = int(time.time())
                if cur_time - start_time > 120:
                    self.report_backup_post_running()
                    start_time = int(time.time())
        self.write_rfi_file_info(input_str, rfi_path)
        self.report_backup_post_complete()

    def write_rfi_file_info_line(self, one_line, file_attr_dict, dir_dict: dict, parent_dict: dict, status):
        node_id = one_line.get("nodeId")
        line_str = ""
        if (node_id == '2'):
            return ""
        parent_id = one_line.get("parentId")
        if node_id == parent_id:
            return ""
        file_name = one_line.get("name")
        path = self.get_file_full_path(node_id, dir_dict, parent_dict, file_name)
        in_str = file_attr_dict[node_id][NdmpFileAttrIndex.INDEX_MTIME] \
            + file_attr_dict[node_id][NdmpFileAttrIndex.INDEX_SIZE] \
            + node_id + file_attr_dict[node_id][NdmpFileAttrIndex.INDEX_FH]
        md5_code = hashlib.md5(in_str.encode(Constant.UTF_8))
        document = {
            "path": path,
            "mtime": file_attr_dict[node_id][NdmpFileAttrIndex.INDEX_MTIME],
            "size": file_attr_dict[node_id][NdmpFileAttrIndex.INDEX_SIZE],
            "inode": node_id,
            "id": file_attr_dict[node_id][NdmpFileAttrIndex.INDEX_FH],
            "type": file_attr_dict[node_id][NdmpFileAttrIndex.INDEX_TYPE],
            "status": status,
            "hashCode": md5_code.hexdigest()
        }
        line_str += json.dumps(document, ensure_ascii=False) + "\n"
        return line_str

    def get_file_full_path(self, node_id, dir_dict, parent_dict, file_name):
        path = "/" + file_name
        if not parent_dict.get(node_id):
            return path
        parent_id = parent_dict[node_id]
        while True:
            if parent_id == '2': # 2代表根路径
                break
            if dir_dict.get(parent_id):
                path = "/" + dir_dict[parent_id] + path
                parent_id = parent_dict[parent_id]
            else:
                break
        return path

    def write_rfi_file_info(self, str_line, rfi_path):
        log.debug(f"Entering write rfi file:{rfi_path}, str len:{len(str_line)}")
        flags = os.O_WRONLY | os.O_CREAT
        modes = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR
        with os.fdopen(os.open(rfi_path, flags, modes), 'a', encoding=Constant.UTF_8) as rfi_file:
            rfi_file.write(str_line)

    def report_backup_post_complete(self):
        log.info(f"backup_post_job_progress success, job id:{self.job_id}.")
        report_job_details(self.req_id, self.job_id, SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id,
                                progress=100,
                                taskStatus=SubJobStatusEnum.COMPLETED.value).dict(by_alias=True))

    def report_backup_post_faild(self):
        log.info(f"backup_post_job_progress failed, job id:{self.job_id}.")
        report_job_details(self.req_id, self.job_id, SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id,
                                progress=100,
                                taskStatus=SubJobStatusEnum.FAILED.value).dict(by_alias=True))

    def report_backup_post_running(self):
        log.info(f"backup_post_job_progress running, job id:{self.job_id}.")
        report_job_details(self.req_id, self.job_id, SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id,
                                progress=100,
                                taskStatus=SubJobStatusEnum.RUNNING.value).dict(by_alias=True))
