#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import subprocess
import re
import os
import time
import stat
import math
import json
import threading

from adds.service.backup.adds_sqlite_service import ADSqliteService
from common.common_models import SubJobDetails, LogDetail
from common.number_const import NumberConst
from common.const import SubJobStatusEnum, SubJobPriorityEnum, RepositoryDataTypeEnum
from adds.comm.common import execute_cmd

from adds.comm.log import log
from adds.schemas.adds_schemas import ResultInfo, SubJob
from adds.service.adds_service_base import ADDSServiceBase
from adds.service.backup.parse_backup_param import BackupParam
from adds.comm.const import SubJobType, LogLevel, ADDSLabel, ADDSCode, ADDSParamConstant
from adds.comm.utils import check_powershell_result, report_job_details_by_rpc, get_powershell_path, convert_copy_id, \
    get_ad_domain, is_normal_mode, CommandResult, get_previous_task_info, scan_dir_size, get_decoding_mode


class ADDSBackupService(ADDSServiceBase):
    def __init__(self, req_id: str, job_id: str, sub_id: str):
        super().__init__(req_id, job_id, sub_id)
        self.param = BackupParam(self.req_id)
        self.ps_loc = get_powershell_path()
        self.encoding_mode = get_decoding_mode()
        self.data_repository_path = self.param.get_backup_path(RepositoryDataTypeEnum.DATA_REPOSITORY.value)
        self.cache_repository_path = self.param.get_backup_path(RepositoryDataTypeEnum.CACHE_REPOSITORY.value)
        self.backup_cmd = f"wbadmin start systemstatebackup -backupTarget:{self.data_repository_path} -quiet"
        self.abort_command = "wbadmin stop job -quiet"
        self.object_flag = self.param.get_object_backup_flag()
        self.states_command = "wbadmin get status"
        self.search_pattern = r"\((\d+)\)"
        self.search_completed_pattern = r'The search for system state files is complete.'
        self.progress_pattern = r'(\d+).*?%'
        self.task_running_flag = False
        self.task_completed_flag = False
        self.keep_job_alive_flag = False
        self.json_path = self.data_repository_path + "/" + "object.json"
        self.canonical_name_file = self.data_repository_path + "/" + "CanonicalNames.txt"
        self.distinguishedname_file = self.data_repository_path + "/" + "DistinguishedName.txt"
        self.meta_repository_path = self.param.get_repository_path(RepositoryDataTypeEnum.META_REPOSITORY.value)
        self.parent_id_map = {}
        self.sysvol_contain_flag = False

    def allow_backup_in_local_node(self):
        log.info(f"allow_backup_in_local_node success, job id:{self.job_id}.")
        response = ResultInfo()
        self.report_result(response.dict(by_alias=True))
        return True

    def check_backup_job_type(self):
        log.info(f"check_backup_job_type success, job id:{self.job_id}.")
        response = ResultInfo()
        self.report_result(response.dict(by_alias=True))
        return True

    def backup_prerequisite(self):
        response = ResultInfo()
        self.report_result(response.dict(by_alias=True))
        return True

    def backup_prerequisite_progress(self):
        log.info(f"backup_prerequisite_progress start, job id:{self.job_id}.")
        check_ad_service_command = "Get-WindowsFeature AD-Domain-Services | Select-Object Name, InstallState"
        if not check_powershell_result(check_ad_service_command, "Installed"):
            log.error(f"AD-Domain-Services not Installed, job id:{self.job_id}.")
            log_detail = LogDetail(logInfo=ADDSLabel.ADDS_PREREQUISITE_CHECK_FAILED,
                                   logDetail=ADDSCode.AD_SERVICE_NOT_INSTALLED.value, logLevel=LogLevel.ERROR.value)
            progress = SubJobDetails(taskId=self.job_id, progress=100, logDetail=[log_detail],
                                     taskStatus=SubJobStatusEnum.FAILED.value)
            self.report_result(progress.dict(by_alias=True))
            return False
        check_backup_server_cmd = "Get-WindowsFeature Windows-Server-Backup | Select-Object Name, InstallState"
        if not check_powershell_result(check_backup_server_cmd, "Installed"):
            log.error(f"Windows-Server-Backup not Installed, job id:{self.job_id}.")
            log_detail = LogDetail(logInfo=ADDSLabel.ADDS_PREREQUISITE_CHECK_FAILED,
                                   logDetail=ADDSCode.BACKUP_SERVER_NOT_INSTALLED.value,
                                   logLevel=LogLevel.ERROR.value)
            progress = SubJobDetails(logDetail=[log_detail], taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                     taskStatus=SubJobStatusEnum.FAILED.value)
            self.report_result(progress.dict(by_alias=True))
            return False
        progress = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        self.report_result(progress.dict(by_alias=True))
        return True

    def backup_gen_sub_job(self):
        log.info(f"backup generate sub job start, job id:{self.job_id}.")
        response = [
            SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName="backupSystemState",
                   ignoreFailed=False).dict(by_alias=True),
            SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName="queryCopy",
                   jobPriority=SubJobPriorityEnum.JOB_PRIORITY_4).dict(by_alias=True)
        ]
        if self.object_flag:
            response.append(
                SubJob(jobId=self.job_id, jobType=SubJobType.BUSINESS_SUB_JOB.value, jobName="backupADObject",
                       ignoreFailed=False).dict(by_alias=True))

        self.report_result(response)
        return True

    def backup(self):
        log.info(f"start backup job,job_id: {self.job_id}.")
        # 4 means running
        if not self.get_dfsr_state():
            log.error(f"DFSR service state is abnormal")
            self.report_job_failed(label=ADDSLabel.ADDS_DFSR_SERVICE_ABNORMAL)
            return False
        if not is_normal_mode():
            log.error("windows is not in normal mode,backup task will failed!")
            self.report_job_failed(label=ADDSLabel.ADDS_BACKUP_COMPUTER_NOT_IN_NORMAL_MODE)
            return False
        if self.param.get_sub_job_name() == "backupSystemState":
            if self.is_other_task_running():
                if not self.check_backup_flag():
                    self.report_job_failed(label=ADDSLabel.ADDS_COMPUTER_OTHER_TASK_RUNNING)
                    return False
                if self.task_completed_flag:
                    self.report_job_complete()
                    return True
            thread_system = threading.Thread(target=self.keep_job_alive)
            thread_system.start()
            self.keep_job_alive_flag = False
            ret_system = self.backup_system_state()
            self.keep_job_alive_flag = True
            thread_system.join()
            return ret_system
        elif self.param.get_sub_job_name() == "backupADObject":
            if self.check_backup_object_flag():
                log.info("backup adObject is already running")
                self.report_complete()
                return True
            thread_object = threading.Thread(target=self.keep_job_alive)
            thread_object.start()
            self.keep_job_alive_flag = False
            try:
                ret_object = self.backup_ad_object()
            except Exception as e:
                log.error(f"Unexpected error: {e}", exc_info=True)
                ret_object = False
            self.keep_job_alive_flag = True
            thread_object.join()
            return ret_object
        else:
            log.warn("backup task name is invalid.")
            return True

    def is_other_task_running(self):
        cmd = "Get-WBJob"
        code, out, err = execute_cmd([self.ps_loc, cmd], timeout=30, cmd_array_flag=True)
        if code == "0" and "Running" in out.strip():
            log.warn("other backup or recovery tasks are running on the computer.")
            return True
        return False

    def check_backup_flag(self):
        file_path = self.cache_repository_path + f"/backup_flag_{self.job_id}"
        if not os.path.exists(file_path):
            log.error(f"backup flag file is not exists. file path: {file_path}")
            return False
        with open(file_path, 'r') as f_read:
            info = f_read.read().strip()
        if info == "Running":
            self.task_running_flag = True
            return True
        if info == "Completed":
            self.task_completed_flag = True
            return True
        return False

    def backup_system_state(self):
        self.report_backup_start()
        log.info(f"job_id: {self.job_id}. ps_loc:{self.ps_loc}, backup_command:{self.backup_cmd}")
        if not self.task_running_flag:
            self.write_backup_flag_file("Running")
            process = subprocess.Popen([self.ps_loc, self.backup_cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
            # wait backup task running
            time.sleep(30)
        states_process = subprocess.Popen([self.ps_loc, self.states_command], stdin=subprocess.PIPE,
                                          stdout=subprocess.PIPE, encoding=self.encoding_mode)

        report_time = int(time.time())
        # 第一次直接上报
        report_flag = True
        scan_number = 0
        while True:
            output = states_process.stdout.readline()
            if output == '' and states_process.poll() is not None:
                break
            if not output.strip():
                continue
            output_str = output.strip()
            log.info(f"output_str:{output_str}")
            if re.search(self.search_pattern, output_str):
                match_info = re.search(self.search_pattern, output_str)
                scan_number = int(match_info.group(1))
                current_time = int(time.time())
                if current_time - report_time >= NumberConst.SIXTY or report_flag:
                    self.report_scan_progress(scan_number)
                    report_time = current_time
                    report_flag = False
            elif re.search(self.progress_pattern, output_str):
                match_info = re.search(self.progress_pattern, output_str)
                progress_info = int(match_info.group(1))
                if not report_flag:
                    self.report_scan_progress(scan_number)
                    self.report_scan_completed()
                    report_flag = True
                current_time = int(time.time())
                if current_time - report_time >= NumberConst.SIXTY:
                    self.report_progress(progress_info)
                    report_time = current_time
            self.is_backup_complete(output_str)
        states_process.communicate(timeout=NumberConst.THIRTY)
        if process is not None:
            process.terminate()
        if states_process.returncode != 0:
            self.get_err_info(states_process.returncode)
            return False
        if not self.sysvol_contain_flag:
            return False
        self.write_backup_flag_file("Completed")
        self.report_job_complete()
        return True

    def get_err_info(self, code):
        task_info = get_previous_task_info()
        log.error(f"the previous task info is : {task_info}")
        task_err_code = task_info.get("DetailedHResult", "0")
        task_err_description = task_info.get("ErrorDescription", "")
        log.error(f"the previous task DetailedHResult is {task_err_code}, ErrorDescription is {task_err_description}")
        if task_err_code != "0":
            self.report_job_failed_with_system_error(task_err_code, task_err_description)
        else:
            self.report_job_failed(code)

    def write_backup_flag_file(self, info):
        file_path = self.cache_repository_path + f"/backup_flag_{self.job_id}"
        mode = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR
        flag = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
        try:
            with os.fdopen(os.open(file_path, flag, mode), 'w', encoding='utf-8') as f:
                f.write(info)
        except Exception as err:
            log.error(f"error occur in write backup flag file: {err}", exc_info=True)
            return False
        return True

    def check_backup_object_flag(self):
        file_path = self.cache_repository_path + f"/backup_object_flag_{self.job_id}"
        if not os.path.exists(file_path):
            log.info("check_backup_object_flag is not exists")
            return False
        log.info("check_backup_object_flag is exists")
        return True

    def write_backup_object_flag_file(self):
        file_path = self.cache_repository_path + f"/backup_object_flag_{self.job_id}"
        mode = stat.S_IWUSR | stat.S_IRUSR | stat.S_IXUSR
        flag = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
        try:
            with os.fdopen(os.open(file_path, flag, mode), 'w', encoding='utf-8') as f:
                f.write("backup_object")
        except Exception as err:
            log.error(f"error occur in write backup object flag file: {err}", exc_info=True)
            return False
        return True

    def backup_ad_object(self):
        log.info("Start Backup Active Directory Object.")
        self.write_backup_object_flag_file()
        number = 20000
        cmd = "Get-ADObject -Filter * -Properties whenCreated,DistinguishedName | Sort-Object whenCreated | " \
              "Select-Object -ExpandProperty DistinguishedName| " \
              f"Out-File -FilePath {self.distinguishedname_file} -Encoding utf8"
        process = subprocess.Popen([self.ps_loc, cmd], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
        # 部分环境查询时间超过保活时间
        while True:
            output = process.stdout.readline()
            if output == b'' and process.poll() is not None:
                log.info("search DistinguishedName completed")
                break
            log.info(f"output of backup object: {output}")
        process.communicate(timeout=NumberConst.SIXTY)
        cmd_code = process.returncode
        log.info("get info completed!")
        if cmd_code != 0:
            log.error(f"search DistinguishedName failed")
            self.report_failed()
            return False
        with open(self.distinguishedname_file, 'r', encoding='utf-8-sig') as f_read:
            id_list = f_read.read().strip().split("\n")
        if len(id_list) == 0:
            log.info("ADObject is empty.")
            self.report_complete()
            return True
        count = math.ceil(len(id_list) / number)
        for i in range(count):
            start = i * number
            end = (i + 1) * number - 1 if ((i + 1) * number - 1) < len(id_list) else len(id_list) - 1
            start_id = id_list[start].replace("'", "''")
            end_id = id_list[end].replace("'", "''")
            create_ad_struct_res = self.save_ad_struct_info(start_id, end_id)
            if not create_ad_struct_res:
                log.error(f"ERROR:write sqlite info Failed!")
                self.report_failed()
                return False
        self.report_complete()
        return True

    def save_ad_struct_info(self, start_id, end_id):
        log.info("save sqlite info.")
        self.report_running()
        get_cmd = (
                    f"$s_time=(Get-ADObject -Identity '{start_id}' -Properties whenCreated).whenCreated\n"
                    f"$e_time=(Get-ADObject -Identity '{end_id}' -Properties whenCreated).whenCreated\n"
                    f"Get-ADObject -Filter * -Properties * | "
                    f"Where-Object {{ $_.whenCreated -ge $s_time -and $_.whenCreated -le $e_time }} | "
                    f"ConvertTo-Json -Compress | Out-File -FilePath '{self.json_path}' -Encoding utf8"
                )
        code, out, err = execute_cmd([self.ps_loc, get_cmd], encoding="utf-8", timeout=180, cmd_array_flag=True)
        if code == "0":
            if not os.path.exists(self.json_path):
                return True
            with open(self.json_path, 'r', encoding='utf-8-sig') as f_read:
                param_dict_list = [json.loads(line) for line in f_read]
            for param_dict in param_dict_list:
                ADSqliteService.write_metadata_to_sqlite_file(self.meta_repository_path, param_dict, self.parent_id_map)
            return True
        else:
            log.error(f"ERROR occured in save ad struct info:{err}")
            self.report_failed()
            return False

    def calculate_progress(self, is_found_stage=False, progress=0):
        current_progress = 30
        if not is_found_stage:
            current_progress = 30 + int(progress * 0.7)
        return current_progress

    def report_backup_start(self):
        log.info(f"report_backup_start,job_id:{self.job_id}")
        log_detail = LogDetail(logInfo=ADDSLabel.START_BACKUP_LABEL, logInfoParam=[self.sub_id],
                               logLevel=LogLevel.INFO.value)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id,
                                        progress=NumberConst.FIVE, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.RUNNING.value).dict(by_alias=True)
        report_job_details_by_rpc(self.req_id, self.sub_id, sub_job_details)

    def report_progress(self, progress):
        log.info(f"report_backup_process,job_id:{self.job_id}")
        log_detail = LogDetail(logInfo=ADDSLabel.ADDS_BACKUP_RUNNING_LABEL,
                               logInfoParam=[str(progress)],
                               logLevel=LogLevel.INFO)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.RUNNING.value, progress=NumberConst.FIFTY)
        report_job_details_by_rpc(self.req_id, self.job_id, sub_job_details.dict(by_alias=True))

    def report_scan_progress(self, number):
        log.info(f"report_backup_scan_process,job_id:{self.job_id}")
        log_detail = LogDetail(logInfo=ADDSLabel.ADDS_SYSTEM_TASK_SCAN, logInfoParam=[str(number)],
                               logLevel=LogLevel.INFO)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.RUNNING.value, progress=NumberConst.FIFTY)
        report_job_details_by_rpc(self.req_id, self.job_id, sub_job_details.dict(by_alias=True))

    def report_scan_completed(self):
        log.info(f"report_backup_scan_completed,job_id:{self.job_id}")
        log_detail = LogDetail(logInfo=ADDSLabel.ADDS_SYSTEM_TASK_SCAN_COMPLETED, logLevel=LogLevel.INFO)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.RUNNING.value, progress=NumberConst.FIFTY)
        report_job_details_by_rpc(self.req_id, self.job_id, sub_job_details.dict(by_alias=True))

    def report_running(self):
        log.info(f"report_backup_job_is_running,job_id:{self.job_id}, sub_id:{self.sub_id}")
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id,
                                        taskStatus=SubJobStatusEnum.RUNNING.value, progress=NumberConst.FIFTY)
        report_job_details_by_rpc(self.req_id, self.job_id, sub_job_details.dict(by_alias=True))

    def report_complete(self):
        log_detail = LogDetail(logInfo=ADDSLabel.PLUGIN_BACKUP_SUBJOB_SUCCESS_LABEL, logInfoParam=[self.sub_id],
                               logLevel=LogLevel.INFO)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.COMPLETED.value, progress=NumberConst.HUNDRED)
        report_job_details_by_rpc(self.req_id, self.job_id, sub_job_details.dict(by_alias=True))

    def report_failed(self):
        label = ADDSLabel.EXECUTE_BACKUP_FAILED_LABEL
        log_detail = LogDetail(logInfo=label, logInfoParam=[self.sub_id], logLevel=LogLevel.ERROR)
        sub_job_details = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, logDetail=[log_detail],
                                        taskStatus=SubJobStatusEnum.FAILED.value, progress=NumberConst.HUNDRED)
        report_job_details_by_rpc(self.req_id, self.job_id, sub_job_details.dict(by_alias=True))

    def report_job_complete(self):
        log.info(f"Report backup job complete, job: {self.job_id}, subJob: {self.sub_id}")
        data_path = os.path.join(self.data_repository_path, ADDSParamConstant.ADDS_DIR)
        data_size = scan_dir_size(data_path)
        log.info(f"Backup size is: {data_size} kb, data path is {data_path}")
        log_detail = LogDetail(logInfo=ADDSLabel.PLUGIN_BACKUP_SUBJOB_SUCCESS_LABEL, logInfoParam=[self.sub_id],
                               logLevel=LogLevel.INFO)
        report_job_details_by_rpc(self.req_id, self.job_id,
                                  SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                                logDetail=[log_detail], dataSize=data_size,
                                                taskStatus=SubJobStatusEnum.COMPLETED.value).dict(by_alias=True))

    def report_job_failed(self, error_code=200, label=ADDSLabel.EXECUTE_BACKUP_FAILED_LABEL):
        log.error(f"Report backup failed, job_id: {self.job_id}, sub_job_id: {self.sub_id}, error code: {error_code}")
        # 获取错误码对应的label和label参数进行上报
        log_detail = LogDetail(logInfo=label, logInfoParam=[self.sub_id], logLevel=LogLevel.ERROR)
        report_job_details_by_rpc(self.req_id, self.job_id,
                                  SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                                logDetail=[log_detail],
                                                taskStatus=SubJobStatusEnum.FAILED.value).dict(by_alias=True))

    def report_job_failed_with_system_error(self, result_code="0", err_description=""):
        error_code = 200
        label = "adds_backup_subjob_failed_with_system_error_label"
        log.error(f"Report backup failed, job_id: {self.job_id}, sub_job_id: {self.sub_id}, error code: {error_code}")
        # 获取错误码对应的label和label参数进行上报
        error_info = f"code:\"{result_code}\",description:\"{err_description}\""
        log.error(error_info)
        log_detail = LogDetail(logInfo=label, logInfoParam=[result_code, err_description], logLevel=LogLevel.ERROR)
        report_job_details_by_rpc(self.req_id, self.job_id,
                                  SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                                logDetail=[log_detail],
                                                taskStatus=SubJobStatusEnum.FAILED.value).dict(by_alias=True))

    def backup_post_job(self):
        log.info(f"backup_post_job start, job id:{self.job_id}.")
        file_path = self.cache_repository_path + f"/backup_flag_{self.job_id}"
        object_path = self.cache_repository_path + f"/backup_object_flag_{self.job_id}"
        if os.path.exists(file_path):
            os.remove(file_path)
        if os.path.exists(object_path):
            os.remove(object_path)
        if os.path.exists(self.canonical_name_file):
            os.remove(self.canonical_name_file)
        if os.path.exists(self.distinguishedname_file):
            os.remove(self.distinguishedname_file)
        response = ResultInfo()
        self.report_result(response.dict(by_alias=True))
        return True

    def backup_post_job_progress(self):
        log.info(f"backup_post_job_progress success, job id:{self.job_id}.")
        progress = SubJobDetails(taskId=self.job_id, subTaskId=self.sub_id, progress=100,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        self.report_result(progress.dict(by_alias=True))
        return True

    def query_backup_copy(self):
        log.info(f"start to query backup copy, job_id:{self.job_id}")
        copy_info = {}
        copy_id = self.param.get_copy_id()
        copy_info['id'] = copy_id
        target_id = convert_copy_id(copy_id)
        repositories_infos = self.param.get_repositories()
        repositories = []
        for repository in repositories_infos:
            repository_type = int(repository['repositoryType'])
            if repository_type == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                repository['remotePath'] = f"{repository['remotePath']}"
                repositories.append(repository)
                break
        copy_info['repositories'] = repositories
        # 定制短共享名，用户恢复使用
        share_name = f"adds_{target_id}"
        parent_path = get_ad_domain()
        copy_info['extendInfo'] = {
            'backupTarget': self.data_repository_path, 'share_name': share_name, 'parent_path': parent_path
        }
        self.report_result(copy_info)
        return True

    def abort_backup_job(self):
        log.info(f"start to abort backup job, job_id:{self.job_id}")
        response = ResultInfo()
        process = subprocess.Popen([self.ps_loc, self.abort_command], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
        process.wait()
        cmd_code = process.returncode
        log.debug(f"abort command exec result:{cmd_code}")
        if cmd_code != 0:
            response.code = 200
            self.report_result(response)
            return False
        return True

    def keep_job_alive(self):
        log.info(f"Keep job alive for {self.job_id}")
        count = NumberConst.ZERO
        while True:
            if self.keep_job_alive_flag:
                log.info(f"jobKeepAlive thread shut down.")
                break
            count += 1
            # 使用count加sleep记时，120s上报一次
            if count == NumberConst.ONE_THOUSAND_TWO_HUNDRED:
                self.report_running()
                count = NumberConst.ZERO
            time.sleep(NumberConst.ZERO_POINT_ONE)  # sleep 0.1s, 线程能更快被join退出

    def get_dfsr_state(self):
        try:
            query_command = f'Get-Service -Name DFSR | Select-Object -ExpandProperty Status'
            process = subprocess.Popen([self.ps_loc, query_command],
                                       stdin=subprocess.PIPE, stdout=subprocess.PIPE, text=True)
            stdout, stderr = process.communicate(timeout=NumberConst.THIRTY)
            # 检查命令执行是否成功
            if process.returncode != 0:
                log.error(f"Failed to get DFSR status. Error: {stderr.strip()}")
                return False
            status = stdout.strip()
            log.info(f"DFSR service status: {status}")
            if status == 'Running':
                return True
        except Exception as e:
            log.error(f"An error occurred: {e}")
            return False
        return False

    def is_backup_complete(self, output_str):
        keyword_pattern = r"SYSVOL"
        log_pattern = r"C:\\Windows\\Logs\\WindowsServerBackup\\Backup-[\d-]+_[\d-]+\.log"
        match_log = re.search(log_pattern, output_str)
        if match_log:
            log_file_path = match_log.group(0)
            log.info(f"Log file path: {log_file_path}")
        else:
            return
        try:
            with open(log_file_path, "r", encoding="UTF-16 LE") as file_handle:
                self.is_contain_key(keyword_pattern, file_handle)
                if not self.sysvol_contain_flag:
                    log.error(f"can't find the SYSVOL component in this backup")
                    self.report_job_failed(label=ADDSLabel.ADDS_SYSVOL_COMPONENT_MISSING)
                return
        except Exception as e:
            log.error(f"Error reading file: {e}")
            self.report_job_failed()
            return

    def is_contain_key(self, keyword_pattern, file_handle):
        lines = file_handle.readlines()
        for line in reversed(lines):
            line = "".join(line.strip())
            match_key = re.search(keyword_pattern, line)
            if match_key:
                log.info(f"find the SYSVOL component in this backup")
                self.sysvol_contain_flag = True
                return
            