import glob
import json
import os
import re
import shutil
import stat
import threading
import time
import datetime
from getpass import getpass
import subprocess
import pymysql
import winrm

from common.cleaner import clear
from common.common import output_execution_result_ex, output_result_file, execute_cmd, invoke_rpc_tool_interface, \
    execute_cmd_list, touch_file, ismount_with_timeout, read_tmp_json_file
from common.common_models import SubJobModel, SubJobDetails, CopyInfoRepModel, Copy, ReportCopyInfoModel, LogDetail
from common.const import ParamConstant, SubJobPolicyEnum, SubJobPriorityEnum, BackupTypeEnum, ExecuteResultEnum, \
    RepositoryDataTypeEnum, SubJobStatusEnum
from common.util.exec_utils import su_exec_rm_cmd

from exchange.common.const import ExchangeSubJobName, SubJobType, ExchangeCode, ActionResponse, ErrorCode, \
    ExchangeBackupLevel, ExchangeQueryStatus, ExchangeCmd, copyMetaFileName, CMDResult, RpcParamKey, \
    LastCopyType, SubJobStatusForSqlite, ObclientStatus, ExchangeReportLabel, LogLevel, ExchangeCmd
from exchange.common.exchange_backup_exception import LogDetailException
from exchange.common.exchange_common import remove_dir, get_env_variable, exec_rc_tool_cmd, report_job_details, \
    get_dir_size, init_sqlite_file, wait_or_lock_sqlite, update_sqlite_sub_job_status, get_agent_id, \
    check_special_characters, str_to_float, check_mount
from exchange.common.exchange_exception import ErrCodeException
from exchange.common.exchange_sqlite_service import ExchangeSqliteService
from exchange.logger import log


class BackUp:

    def __init__(self, pid, job_id, sub_job_id, data, json_param):
        if not json_param:
            log.error("Parse params obj is null.")
            raise Exception("Parse params obj is null.")
        self._json_param = json_param
        self._pid = pid
        self._job_id = job_id
        self._sub_job_id = sub_job_id
        self._std_in = data

        self._protect_env = self._json_param.get("job", {}).get("protectEnv", {})
        self._protect_object = self._json_param.get("job", {}).get("protectObject", {})
        self._repositories = self._json_param.get("job", {}).get("repositories", [])
        self._cache_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.CACHE_REPOSITORY)
        self._data_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.DATA_REPOSITORY)
        self._meta_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.META_REPOSITORY)
        self._log_area = self.get_repository_path(json_param, RepositoryDataTypeEnum.LOG_REPOSITORY)

        self._backup_type = self._json_param.get("job", {}).get("jobParam", {}).get("backupType", "")

        self._copy_id = self._json_param.get("job", {}).get("copy", [])[0].get("id", "")

        self._query_time_interval = 3
        self._group_name, self._group_id = self.get_group_name_and_id(self._protect_env)
        if not check_special_characters(self._group_name):
            log.error(f'the group_name is Illegal {self._group_name}')
            raise Exception(f'the group_name is Illegal {self._group_name}')
        self._persistent_mount = os.path.join("/", self._group_name, self._group_id)

        # 对应于 Exchange 的备份，mailbox_name_list、mailbox_id_list 和 mailbox_name_id_list 会指向邮箱
        self._mailbox_name_list = []  # 邮箱名称
        self._mailbox_id_list = []  # 邮箱 ID
        self._mailbox_name_id_list = []  # 邮箱名称与 ID 的结合
        self._backup_id = 0
        self._incarnation_id = 0
        self._backup_time = ''
        self._max_next_time = ''
        self._data_size = 0

        self._job_status = SubJobStatusEnum.RUNNING

    @staticmethod
    def get_enter_database_ip_port(protect_env):
        group_info_str = protect_env.get("extendInfo", {}).get("groupInfo", "")
        group_info = json.loads(group_info_str)
        mailbox_list = group_info.get("mailboxes", [])
        mailbox_info_dict = mailbox_list[0]
        ip = mailbox_info_dict.get("ip", "")
        port = int(mailbox_info_dict.get("port", ""))
        return ip, port

    @staticmethod
    def get_group_name_and_id(protect_object):
        group_info_str = protect_object.get("extendInfo", {}).get("groupInfo", "")
        group_info = json.loads(group_info_str)
        group_id = group_info.get("group_id", "")
        group_name = group_info.get("group_name", "")
        return group_name, group_id

    @staticmethod
    def get_repository_path(json_param, repository_type):
        repositories = json_param.get("job", {}).get("repositories", [])
        repositories_path = ""
        for repository in repositories:
            if repository['repositoryType'] == repository_type:
                repositories_path = repository["path"][0]
                break
        return repositories_path

    @staticmethod
    def set_error_response(response):
        response.code = ExchangeCode.FAILED.value
        response.body_err = ExchangeCode.FAILED.value

    @staticmethod
    def backup_exchange_cmd(cmd, mailbox_id_list=None, bs_key=0, mailbox_name_list="", backup_destination=""):
        if mailbox_id_list is None:
            mailbox_id_list = ["NONE", "NONE"]
        exchange_ps_cmd_dict = {
            ExchangeCmd.BACKUP_STATUS: f'Get-MailboxDatabase -Server {",".join(mailbox_name_list)} -Status | fl Name,*FullBackup',
            ExchangeCmd.GROUP_FULL_BACKUP: 'Start-MailboxDatabaseBackup -Full',
            ExchangeCmd.GROUP_INCRE_BACKUP: 'Start-MailboxDatabaseBackup -Incremental',
            ExchangeCmd.MAILBOX_FULL_BACKUP: f'wbadmin start backup -backupTarget:{backup_destination} '
                                             '-vssFull -allCritical -quiet',
            ExchangeCmd.QUERY_INCARNATION_ID: 'Get-MailboxDatabaseCopyStatus | Select-Object Incarnation',
            ExchangeCmd.QUERY_LOG_MAX_NEXT_TIME: 'Get-MailboxDatabaseCopyStatus | Select-Object MaxNextTime',
            ExchangeCmd.QUERY_BACKUP_TIME: f'Get-MailboxDatabaseCopyStatus -Identity {",".join(mailbox_id_list)} | '
                                           f'Select-Object -ExpandProperty CompletionTime | Where-Object {{"$_.BS_KEY -eq {bs_key}"}}',
            ExchangeCmd.QUERY_DATABASE_FOR_DISPLAY: f'Get-MailboxDatabase | Where-Object {{$_.mailboxId -in {",".join(mailbox_id_list)}}} | '
                                                    f'Select-Object mailboxName, DatabaseName',
            ExchangeCmd.QUERY_TABLE_FOR_DISPLAY: f'Get-MailboxDatabase | Where-Object {{$_.mailboxId -in {",".join(mailbox_id_list)}}} | '
                                                 f'Select-Object mailboxName, DatabaseName, TableName',
            ExchangeCmd.QUERY_OLD_BACKUP_SET: f'Get-MailboxDatabaseBackup -Level Group -Status Completed | '
                                              f'Where-Object {{$_.BsKey -lt {bs_key}}} | Select-Object -ExpandProperty BsKey'
        }
        ps_cmd = exchange_ps_cmd_dict.get(cmd)
        log.info(f'ps_cmd : {ps_cmd}')
        return ps_cmd

    @staticmethod
    def exec_exchange_cmd(ip, port, cmd_str, pid):
        # 获取 Exchange 连接会话
        session = BackUp.get_exchange_session(ip, port, pid)

        try:
            # 使用 session 执行 PowerShell 命令
            result = session.run_ps(cmd_str)

            # 获取命令执行结果
            output = result.std_out.decode().strip()

            if result.status_code != 0:  # 检查执行是否成功
                raise Exception(f"PowerShell command execution failed. Error: {output}")

            # 返回执行结果
            return True, output

        except Exception as ex:
            log.error(f"execute cmd {cmd_str} failed!, ex is {ex}")
            return False, ex

    @staticmethod
    def get_exchange_session(ip, port, pid):
        # 获取认证信息
        user = get_env_variable(f'job_protectEnv'
                                f'_auth_authKey_{pid}')
        db_pwd = get_env_variable(f'job_protectEnv_auth_authPwd_{pid}')

        try:
            # 获取凭据，模拟 Get-Credential
            username = user
            password = db_pwd

            # 使用 Kerberos 身份验证方式创建 PowerShell 会话
            # Exchange 服务器的完全限定域名（FQDN）
            server_fqdn = ip  # 这里假设 IP 是服务器的 FQDN，你可以根据需要修改

            # 组装 PowerShell 会话 URL
            url = f'http://{server_fqdn}/PowerShell/'

            # 创建 WinRM 会话
            session = winrm.Session(url, auth=(username, password), transport='kerberos')

            # 返回这个会话对象用于执行 PowerShell 命令
            return session

        except Exception as except_str:
            log.error(f"Connect to Exchange Mailbox: {ip} service failed!")
            raise ErrCodeException(ErrorCode.ERROR_AUTH, "Check connectivity: auth info error!") from except_str

        finally:
            # 清理敏感信息
            clear(db_pwd)

    @staticmethod
    def exec_exchange_cmd_with_expect(ip, port, cmd_str, expect_output, pid):
        log.info(f"exec_exchange_sql_with_expect starts, sql_str is {cmd_str}, expect_output is {expect_output}")
        output = ''
        try:
            conn = BackUp.get_exchange_session(ip, port, pid)
            cur = conn.cursor()
        except Exception as err:
            log.error(f"execute sql {cmd_str} failed!")
            return False, err
        while not BackUp.check_sql_output(output, expect_output):
            try:
                cur.execute(cmd_str)
                output = cur.fetchall()
            except Exception as err:
                log.error(f"execute sql {cmd_str} failed!")
                cur.close()
                conn.close()
                return False, err
            if ExchangeQueryStatus.FAILED in output:
                log.error(f"execute sql {cmd_str} failed, output STATUS is {output} ")
                cur.close()
                conn.close()
                return False, Exception("Status FAILED in output")
            log.info(f"now the output is {output} and expect output is {expect_output}")
            time.sleep(10)
        cur.close()
        conn.close()
        return True, output

    @staticmethod
    def check_sql_output(output, expect_output):
        if len(set(output)) == 1 and output[0][0] == expect_output:
            return True
        else:
            return False

    @staticmethod
    def exec_multi_process_cmd(cmd_list):
        threads = []
        for cmd in cmd_list:
            copy_thread = threading.Thread(target=execute_cmd, args=(cmd,))  # 调用函数,引入线程参数
            copy_thread.start()  # 开始执行
            threads.append(copy_thread)
        for copy_thread in threads:
            copy_thread.join()

    @staticmethod
    def check_backup_job_level(protect_object):
        sub_type = protect_object.get("subType", "")
        if sub_type == "Exchange-group":
            return ExchangeBackupLevel.BACKUP_GROUP_LEVEL
        else:
            return ExchangeBackupLevel.BACKUP_MAILBOX_LEVEL

    @staticmethod
    def calculate_speed(speed_file, copy_path):
        if not os.path.exists(speed_file):
            log.info("speed file has been removed, return False")
            return False, 0
        log.info("RUNNING and calculating")
        with open(speed_file, "r", encoding='UTF-8') as f_content:
            content = f_content.read()
            speed_info = json.loads(content)

        data_size_old = speed_info.get('data_size', int)
        time_old = speed_info.get('time', int)

        data_size_new = get_dir_size(copy_path)
        time_new = int((time.time()))

        log.info(f"time_new and time_old is {time_new, time_old}")
        log.info(f"data_size_new and data_size_old is {data_size_new, data_size_old}")
        data_size_diff = int(data_size_new) - int(data_size_old)
        time_diff = time_new - int(time_old)
        speed_info = {
            'data_size': data_size_new,
            'time': time_new
        }
        output_execution_result_ex(speed_file, speed_info)
        if not time_diff:
            log.info(f"query_size_and_speed, time_diff is {time_diff}")
            return 0, data_size_diff
        else:
            try:
                speed = data_size_diff / time_diff
                return speed, data_size_diff
            except Exception:
                log.error("Error while calculating speed!")
                return 0, 0

    @staticmethod
    def get_local_ip(json_param):
        current_agent_id = get_agent_id()
        local_ip = None
        nodes = json_param['job']['protectEnv']['nodes']
        for node in nodes:
            if current_agent_id == node['id']:
                local_ip = node['endpoint']
                break
        return local_ip

    @staticmethod
    def exe_mount(data_remote_path, data_repo, mount_point):
        remote_hosts = data_repo.get("remoteHost")
        for remote_host in remote_hosts:
            remote_ip = remote_host.get("ip")
            mount_cmd_str = f"sudo mount -tnfs4 -o rw,nfsvers=4.1,sync,lookupcache=positive,hard,timeo=600," \
                            f"wsize=1048576,rsize=1048576,namlen=255 {remote_ip}:{data_remote_path} {mount_point}"
            return_code, out_info, err_info = execute_cmd(mount_cmd_str)
            if return_code == CMDResult.SUCCESS:
                break
            else:
                log.error(f"The execute mount cmd failed! ERROR_INFO : {err_info}")
        return return_code

    def exec_mount_job(self):
        log.info(f"exec_mount_job {self._job_id}")
        mount_point = os.path.join("/", self._group_name, self._group_id)
        data_repo = None
        for repository in self._repositories:
            if repository.get("repositoryType", "") == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                data_repo = repository
        data_remote_path = data_repo.get("remotePath", "")
        # 如果挂载点已经挂载data仓的remotePath：直接返回成功
        if check_mount(mount_point) and self.deal_mount(mount_point):
            return True
        # 判断挂载点状态，检测是否被服务端解挂载
        return_code, out_info, err_info = execute_cmd(f'cd {mount_point}')
        if return_code != CMDResult.SUCCESS:
            # 执行一次解挂载,并且记录状态,用于重启开启ob备份
            execute_cmd(f'umount -l {mount_point}')
            log.info("backup dest was unmounted from X8000, execute umount command here")
            reopen_log_file = os.path.join(self._meta_area, f"reopen_log_{self._job_id}")
            touch_file(reopen_log_file)
            log.info(f"create reopen_log_file here")
        local_ip = self.get_local_ip(self._json_param)
        log.warning(f"mount point has not been mounted yet")
        # 没有挂载：创建挂载点 mkdir -p /{group_name}/{group_id}, 并chown
        return_code, out_info, err_info = execute_cmd(f'mkdir -p {mount_point}')
        log.info(f"mkdir return_code, out_info, err_info is {return_code, out_info, err_info}")
        if return_code != CMDResult.SUCCESS or not os.path.exists(mount_point):
            log.error(f"The execute mkdir_local_dir_path cmd failed! ERROR_INFO : {err_info}")
            err_log_detail = LogDetail(logInfo=ExchangeReportLabel.BACKUP_MKDIR_MOUNT_POINT_FAIL_LABEL,
                                       logInfoParam=[local_ip], logLevel=LogLevel.ERROR)
            raise LogDetailException(log_detail=err_log_detail)
        # 执行挂载命令
        return_code = self.exe_mount(data_remote_path, data_repo, mount_point)
        if return_code != CMDResult.SUCCESS:
            err_log_detail = LogDetail(logInfo=ExchangeReportLabel.BACKUP_MOUNT_FAIL_LABEL, logInfoParam=[local_ip],
                                       logLevel=LogLevel.ERROR)
            raise LogDetailException(log_detail=err_log_detail)
        # 修改权限，因为里面有的目录不能修改，所以肯定会报错，但不影响任务执行
        execute_cmd(f'chmod -R 750 {os.path.join("/", self._group_name)}')
        execute_cmd(f'chown -R admin:admin {os.path.join("/", self._group_name)}')
        # 将挂载信息写入/etc/fstab "x8000文件系统 挂载点 nfs defaults 1 1"
        # 等待10s，否则检查挂载可能失败
        time.sleep(10)
        # 重新检查是否挂载成功(因为是刚挂载的，应该不存在网不通挂载路径还在的情况，就不会卡死)
        if not os.path.ismount(mount_point):
            log.error("fail to mount x8000 to mailbox")
            err_log_detail = LogDetail(logInfo=ExchangeReportLabel.BACKUP_MOUNT_FAIL_LABEL, logInfoParam=[local_ip],
                                       logLevel=LogLevel.ERROR)
            raise LogDetailException(log_detail=err_log_detail)
        return True

    def deal_mount(self, mount_point):
        log.info(f"mount point has already been mounted")
        check_cmd = ["mount", f"grep {mount_point}"]
        return_code, std_out, std_err = execute_cmd_list(check_cmd)
        if return_code != CMDResult.SUCCESS:
            log.error(f"Fail to get mount info {std_err}")
            return True
        if self._protect_env.get("id", "") in std_out:
            return True
        # 解挂载
        ret, _, err = execute_cmd(f"umount -l {mount_point}")
        if ret != CMDResult.SUCCESS:
            log.error(f"Fail to umount {err}")
            return True
        try:
            shutil.rmtree(os.path.join("/", self._group_name))
        except Exception as err:
            log.error(f"Fail to remove mount path for resource, {err}")
        return False

    def pre_check_before_sub_job(self, priority):
        sqlite_file_name = os.path.join(self._meta_area, 'sqlite_file', f'{self._job_id}')
        ret = wait_or_lock_sqlite(sqlite_file_name=sqlite_file_name,
                                  timeout=20,
                                  sub_job_priority=priority.value)
        while ret == SubJobStatusForSqlite.DOING.value:
            time.sleep(10)
            ret = wait_or_lock_sqlite(sqlite_file_name=sqlite_file_name,
                                      timeout=20,
                                      sub_job_priority=priority.value)
            log.info("waiting other node execute this sub job")
        if ret == SubJobStatusForSqlite.SUCCESS.value:
            return True
        elif ret == SubJobStatusForSqlite.FAILED.value:
            return False
        conn = ret
        return conn

    def set_backup_dest(self, ip, port, backup_dest_expected):
        log.warn("start to set backup dest")
        # 上报警告提示码ErrorCode.WARN_SET_BACKUP_DEST
        sql_str = F"ALTER SYSTEM SET backup_dest='file://{backup_dest_expected}';"
        ret, output = self.exec_exchange_cmd(ip, port, sql_str, self._pid)
        if not ret:
            err_log_detail = LogDetail(logInfo=ExchangeReportLabel.BACKUP_SET_BACKUP_DEST_FAIL_LABEL,
                                       logInfoParam=[str(output)], logLevel=LogLevel.ERROR)
            raise LogDetailException(log_detail=err_log_detail)
        log.info(f"reset backup dest success")
        return True

    def exec_data_backup_sub_job(self):
        log.info("start to exec_data_backup_sub_job")
        ret = self.pre_check_before_sub_job(SubJobPriorityEnum.JOB_PRIORITY_2)
        if isinstance(ret, bool):
            return ret, 0, 0
        conn = ret
        ip, port = self.get_enter_database_ip_port(self._protect_env)
        if not self.check_mailbox_connection(ip, port, self._pid):
            if not update_sqlite_sub_job_status(conn=conn, updated_status=SubJobStatusForSqlite.RETRY.value,
                                                priority=SubJobPriorityEnum.JOB_PRIORITY_2.value):
                err_log_detail = LogDetail(logInfo=ExchangeReportLabel.BACKUP_ALL_OBCLIENT_OFFLINE_LABEL,
                                           logInfoParam=[], logLevel=LogLevel.ERROR)
                raise LogDetailException(log_detail=err_log_detail)
            log.debug("local node fail to connect mailbox, update sqlite status to RETRY")
            return True, 0, 0
        # 上报备份速度
        self._job_status = SubJobStatusEnum.RUNNING
        time_start = int((time.time()))
        log.info("start to upload speed")
        data_path = self.get_data_path()
        progress_thread = threading.Thread(name='pre_progress', target=self.upload_backup_progress,
                                           args=(data_path,))
        progress_thread.daemon = True
        progress_thread.start()

        # 查询备份的库表信息
        self.sqlite_for_display(ip, port)
        log.info("start thread for creating sqlite")
        # 执行备份命令
        self.exec_data_backup_cmd(ip, port, conn)

        cmd_str = self.backup_exchange_cmd(cmd=ExchangeCmd.BACKUP_STATUS,
                                           mailbox_id_list=self._mailbox_id_list)
        expect_output = ExchangeQueryStatus.SUCCESS
        time_end = int((time.time()))

        ret, output = self.exec_exchange_cmd_with_expect(ip, port, cmd_str, expect_output, self._pid)
        if not ret:
            log.error("exec sql and the output of query is not SUCCESS")
            update_sqlite_sub_job_status(conn=conn, updated_status=SubJobStatusForSqlite.FAILED.value,
                                         priority=SubJobPriorityEnum.JOB_PRIORITY_2.value)
            err_log_detail = LogDetail(logInfo=ExchangeReportLabel.BACKUP_EXEC_BACKUP_SUB_JOB_FAIL_LABEL,
                                       logInfoParam=[str(output)], logLevel=LogLevel.ERROR)
            raise LogDetailException(log_detail=err_log_detail)
        update_sqlite_sub_job_status(conn=conn, updated_status=SubJobStatusForSqlite.SUCCESS.value,
                                     priority=SubJobPriorityEnum.JOB_PRIORITY_2.value)

        self._job_status = SubJobStatusEnum.COMPLETED
        # 此处关闭线程
        log.info("end to upload speed")
        progress_thread.join()
        # 获取副本大小和速度
        data_size = self._data_size
        speed = (time_end - time_start) /data_size
        # data_size, speed = self.get_data_size_and_speed(ip, port, time_start, time_end)
        # 上报副本信息
        self.upload_data_copy(ip, port)
        return True, data_size, speed

    def get_data_path(self):
        data_path = self._persistent_mount
        backup_job_level = self.check_backup_job_level(self._protect_object)
        if backup_job_level == ExchangeBackupLevel.BACKUP_GROUP_LEVEL:
            data_path = os.path.join(data_path, "group")
        elif backup_job_level == ExchangeBackupLevel.BACKUP_MAILBOX_LEVEL:
            data_path = os.path.join(self._data_area, self._copy_id)
        return data_path

    def get_backup_size(self, backup_destination):
        # 使用 du 命令获取 Linux 系统中备份目标目录的大小（单位：字节）
        cmd = f"du -sb {backup_destination}"  # '-s' 是总结大小，'-b' 以字节为单位
        try:
            result = subprocess.run(cmd, capture_output=True, text=True, check=True, shell=True)
            output = result.stdout.strip()
            size_in_bytes = int(output.split()[0])  # 获取字节数
            return size_in_bytes
        except subprocess.CalledProcessError as e:
            log.error(f"Error getting backup size: {e}")
            return 0

    def upload_data_copy(self, ip, port):
        self._incarnation_id = self.query_incarnation_id(ip, port)
        self._backup_time = self.query_backup_time(ip, port)
        min_max_next_time = min(self.query_log_max_next_time().values())
        while min_max_next_time < self._backup_time:
            # 等待日志备份的max_next_time 追平 备份的completion_time
            log.info(f"wait log archive")
            log.info(f"now max_next_time {min_max_next_time}, completion time is {self._backup_time}")
            time.sleep(30)
            min_max_next_time = min(self.query_log_max_next_time().values())
        log.info(f"min_max_next_time is {min_max_next_time}, backup_time is {self._backup_time}")
        # 如果是全量备份，需要清除之前的副本
        self.clear_backup_set(ip, port)
        self.report_group_info()

    def clear_backup_set(self, ip, port):
        backup_job_level = self.check_backup_job_level(self._protect_object)
        if backup_job_level == ExchangeBackupLevel.BACKUP_MAILBOX_LEVEL:
            return
        if not self._backup_type == BackupTypeEnum.FULL_BACKUP:
            return
        sql_str = self.backup_exchange_cmd(cmd=ExchangeCmd.QUERY_OLD_BACKUP_SET, bs_key=self._backup_id)
        ret, output = self.exec_exchange_cmd(ip, port, sql_str, self._pid)
        if not ret or not output:
            log.warn("query old backup set ids failed")
            return
        for bs_key in output:
            backup_set_id = bs_key[0]
            clear_cnt = 1
            clear_sql = f"ALTER SYSTEM DELETE BACKUPSET {backup_set_id}"
            clear_ret, output = self.exec_exchange_cmd(ip, port, clear_sql, self._pid)
            time.sleep(15)
            while not clear_ret:
                if clear_cnt > 100:
                    log.error("clear backup set over 100 times")
                    return
                clear_cnt += 1
                log.warn("wait last clear task finish")
                clear_ret, output = self.exec_exchange_cmd(ip, port, clear_sql, self._pid)
                time.sleep(15)
        return

    def exec_data_backup_cmd(self, ip, port, conn):
        backup_job_level = self.check_backup_job_level(self._protect_object)
        if backup_job_level == ExchangeBackupLevel.BACKUP_GROUP_LEVEL:
            # if self._backup_type == BackupTypeEnum.INCRE_BACKUP:
            #     sql_str = self.backup_exchange_cmd(ExchangeCmd.GROUP_INCRE_BACKUP)
            # else:
            #     sql_str = self.backup_exchange_cmd(ExchangeCmd.GROUP_FULL_BACKUP)
            # ret, output = self.exec_exchange_cmd(ip, port, sql_str, self._pid)
            log.error("TODO Feat")
            return False
        elif backup_job_level == ExchangeBackupLevel.BACKUP_MAILBOX_LEVEL:
            mailbox_name_list_str = ','.join(self._mailbox_name_list)
            backup_destination = os.path.join(self._data_area, self._copy_id)
            mount_point = os.path.join("/", self._group_name, self._group_id)
            user = get_env_variable(f'job_protectEnv'
                                    f'_auth_authKey_{self._pid}')
            pwd = get_env_variable(f'job_protectEnv_auth_authPwd_{self._pid}')
            cmd_str_list = [
                f"mkdir -p {mount_point}",  # 创建挂载点目录
                f"chown -R admin:admin {mount_point}",  # 设置挂载点的权限
                 f"mount //{ip}/{backup_destination} {mount_point} -o username={user},password={pwd},vers=3.0"  # 使用 IP 地址变量挂载
                # 挂载 Windows 文件夹到 Linux
            ]
            execute_cmd_list(cmd_str_list)
            sql_str = self.backup_exchange_cmd(ExchangeCmd.MAILBOX_FULL_BACKUP,
                                               mailbox_name_list=mailbox_name_list_str,
                                               backup_destination=f'file://{backup_destination}')
            ret, output = self.exec_exchange_cmd(ip, port, sql_str, self._pid)

        if not ret:
            log.error("fail to exec backup sql statement")
            update_sqlite_sub_job_status(conn=conn, updated_status=SubJobStatusForSqlite.FAILED.value,
                                         priority=SubJobPriorityEnum.JOB_PRIORITY_2.value)
            err_log_detail = LogDetail(logInfo=ExchangeReportLabel.BACKUP_EXEC_BACKUP_SUB_JOB_FAIL_LABEL,
                                       logInfoParam=[str(output)], logLevel=LogLevel.ERROR)
            raise LogDetailException(log_detail=err_log_detail)
        time.sleep(self._query_time_interval)  # sleep一段时间，太快查询会查不到结果
        sql_str = f'Get-MailboxDatabase -Server {ip} -Status | fl Name,*FullBackup'
        ret, output = self.exec_exchange_cmd(ip, port, sql_str, self._pid)
        if not ret:
            log.error("fail to query Backup status")
            update_sqlite_sub_job_status(conn=conn, updated_status=SubJobStatusForSqlite.FAILED.value,
                                         priority=SubJobPriorityEnum.JOB_PRIORITY_2.value)
            err_log_detail = LogDetail(logInfo=ExchangeReportLabel.BACKUP_EXEC_BACKUP_SUB_JOB_FAIL_LABEL,
                                       logInfoParam=[str(output)], logLevel=LogLevel.ERROR)
            raise LogDetailException(log_detail=err_log_detail)
        self._backup_id = max(output)[-1]
        log.info(f'the backup_id for this backup is : {self._backup_id}')

        # 获取备份文件大小
        self._data_size = self.get_backup_size(backup_destination)
        log.info(f"Backup size: {self._data_size} bytes")

        return True

    def report_group_info(self):
        if self._backup_type == BackupTypeEnum.FULL_BACKUP:
            last_copy_id_file = os.path.join(self._meta_area, "lastCopyId", "lastCopyId")
            last_copy_id_path = os.path.join(self._meta_area, "lastCopyId")
            log.info(f"current task copyId: {self._copy_id}")
            if not os.path.exists(last_copy_id_path):
                os.makedirs(last_copy_id_path)
            output_execution_result_ex(last_copy_id_file, self._copy_id)

        # 保存当前实例的集群信息，用于恢复时使用
        group_info = self._protect_env.get("extendInfo", {}).get("groupInfo")
        copy_info = {
            "group_info": group_info,
            "backup_time": self._backup_time
        }
        log.info(f"upload backup_time is {self._backup_time}")
        copy_info_file = os.path.join(self._meta_area, self._copy_id, "copy_info")
        copy_info_path = os.path.join(self._meta_area, self._copy_id)
        if not os.path.exists(copy_info_path):
            os.makedirs(copy_info_path)
        output_execution_result_ex(copy_info_file, copy_info)
        log.info("step2-6 end to sub_job_exec")
        self.report_copy_info()

    def query_backup_destination(self, ip, port):
        sql_str = BackUp.backup_exchange_cmd(ExchangeCmd.QUERY_BACKUP_DESTINATION)
        log.info("query_backup_destination")
        ret, output = self.exec_exchange_cmd(ip, port, sql_str, self._pid)
        if not ret:
            log.error("fail to query backup_destination")
            return ""
        if not output:
            return ""
        try:
            dest = output[0][0]
            return dest
        except KeyError:
            return ""

    def query_incarnation_id(self, ip, port):
        sql_str = BackUp.backup_exchange_cmd(ExchangeCmd.QUERY_INCARNATION_ID)
        ret, output = self.exec_exchange_cmd(ip, port, sql_str, self._pid)
        if len(set(output)) != 1:
            log.error(f"query_incarnation_id return {output}, not single incarnation id")
            return False
        try:
            return output[0][0]
        except KeyError:
            return False

    def query_backup_time(self, ip, port):
        sql_str = BackUp.backup_exchange_cmd(ExchangeCmd.QUERY_BACKUP_TIME,
                                             bs_key=self._backup_id, mailbox_id_list=self._mailbox_id_list)
        ret, output = self.exec_exchange_cmd(ip, port, sql_str, self._pid)
        if len(set(output)) != 1:
            log.error(f"backup_backup_time is different: {output}")
            return False
        try:
            output_time = str(output[0][0]).split('.')[0]
            return output_time
        except KeyError:
            return False

    def query_log_max_next_time(self):
        sql_str = BackUp.backup_exchange_cmd(ExchangeCmd.QUERY_LOG_MAX_NEXT_TIME,
                                             mailbox_id_list=self._mailbox_id_list)
        ip, port = self.get_enter_database_ip_port(self._protect_env)
        ret, output = self.exec_exchange_cmd(ip, port, sql_str, self._pid)
        if not ret:
            log.error("fail to get log max_next_time")
            return {}
        dict_max_next_time = dict(output)
        return dict_max_next_time

    def get_last_copy_info(self, copy_type: int):
        # 获取上次数据备份副本信息
        log.info("start get_last_copy_info")
        last_copy_type = LastCopyType.last_copy_type_dict.get(copy_type)
        input_param = {
            RpcParamKey.APPLICATION: self._json_param.get("job", {}).get("protectObject"),
            RpcParamKey.TYPES: last_copy_type,
            RpcParamKey.COPY_ID: "",
            RpcParamKey.JOB_ID: self._job_id
        }
        try:
            result = invoke_rpc_tool_interface(self._job_id, RpcParamKey.QUERY_PREVIOUS_CPOY, input_param)
        except Exception as err_info:
            log.error(f"Get last copy info fail.{err_info}")
            return {}
        return result

    def report_copy_info(self):
        backup_time = self._backup_time
        backup_time = datetime.datetime.strptime(backup_time, "%Y-%m-%d %H:%M:%S")
        backup_time = backup_time.timestamp()

        extend_info = {
            "copy_id": self._copy_id,
            "backup_time": backup_time,
        }
        copy = Copy(repositories=[], extendInfo=extend_info)
        copy_info = ReportCopyInfoModel(copy=copy, jobId=self._job_id).dict(by_alias=True)
        invoke_rpc_tool_interface(self._job_id, RpcParamKey.REPORT_COPY_INFO, copy_info)
        log.debug(f"Finish report copy_info!")

    def upload_backup_progress(self, data_path):
        log.info(f"query backup speed and size")
        report_progress_thread = threading.Thread(name='report_progress', target=self.report_backup_progress)
        report_progress_thread.daemon = True
        report_progress_thread.start()
        speed_file = os.path.join(self._cache_area, f'speed_{self._job_id}')
        time_init = int((time.time()))
        speed_info = {
            'data_size': int(get_dir_size(data_path)),
            'time': time_init
        }
        log.info(f'initial speed info is {speed_info}')
        output_execution_result_ex(speed_file, speed_info)
        process_file = os.path.join(self._cache_area, f"BackupProgress_{self._job_id}")
        while self._job_status == SubJobStatusEnum.RUNNING:
            log.info(f"progress RUNNING")
            speed, data_size_diff = self.calculate_speed(speed_file, data_path)
            if not os.path.exists(speed_file):
                log.info("speed file has been removed, break")
                break
            log.info(f'speed, data_size_diff is {speed, data_size_diff}')
            sub_job_dict = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                         taskStatus=SubJobStatusEnum.RUNNING.value,
                                         speed=speed, logDetail=[], progress=20)

            progress_dict = sub_job_dict.dict(by_alias=True)
            output_execution_result_ex(process_file, progress_dict)
            time.sleep(self._query_time_interval)
        touch_file(os.path.join(self._cache_area, f"BackupComputeOverFlag_{self._job_id}"))
        report_progress_thread.join()
        log.info("sub thread ends here")

    def report_backup_progress(self):
        log.info("report start.")
        sub_job_dict = SubJobDetails(taskId=self._job_id, subTaskId=self._sub_job_id,
                                     taskStatus=SubJobStatusEnum.RUNNING.value, logDetail=[], progress=20)
        progress_dict = sub_job_dict.dict(by_alias=True)
        process_file = os.path.join(self._cache_area, f"BackupProgress_{self._job_id}")
        # 等上面的upload_backup_progress线程结束了，这边上报才能结束掉
        backup_compute_over_flag_file = os.path.join(self._cache_area, f"BackupComputeOverFlag_{self._job_id}")
        while not os.path.exists(backup_compute_over_flag_file):
            if os.path.exists(process_file):
                log.debug(f"process file exists.")
                try:
                    progress_dict = read_tmp_json_file(process_file)
                except Exception as ex:
                    log.error(ex, exc_info=True)
            report_job_details(self._job_id, progress_dict)
            time.sleep(self._query_time_interval)
        if os.path.isfile(process_file):
            if not su_exec_rm_cmd(process_file):
                log.warn(f"Fail to remove {process_file}.")
        if os.path.isfile(backup_compute_over_flag_file):
            if not su_exec_rm_cmd(backup_compute_over_flag_file):
                log.warn(f"Fail to remove {backup_compute_over_flag_file}.")
        log.info("report end.")

    def check_mailbox_connection(self, ip, port, pid):
        try:
            self.get_exchange_session(ip, port, pid)
            return True
        except Exception:
            return False

    def sqlite_for_display(self, mailbox_ip, mailbox_port):
        path = os.path.join(self._meta_area)
        mailbox_name_list = self._mailbox_name_list
        # 查出mailbox中所有能查到的database与table
        query_database_str = self.backup_exchange_cmd(ExchangeCmd.QUERY_DATABASE_FOR_DISPLAY,
                                                      mailbox_id_list=self._mailbox_id_list)
        query_table_str = self.backup_exchange_cmd(ExchangeCmd.QUERY_TABLE_FOR_DISPLAY,
                                                   mailbox_id_list=self._mailbox_id_list)
        ret_database, output_database = self.exec_exchange_cmd(mailbox_ip, mailbox_port,
                                                               query_database_str, self._pid)
        ret_table, output_table = self.exec_exchange_cmd(mailbox_ip, mailbox_port,
                                                         query_table_str, self._pid)
        log.info(f"result query from database is {output_database, output_table}")
        if not ret_database or not ret_table:
            log.error("query database structure failed")
            return
        try:
            # 将查出的database与table写入sqlite文件
            ExchangeSqliteService.write_sqlite(path, mailbox_name_list, output_database, output_table)
        except Exception as exception:
            log.error(f'fail to write sqlite for display, error is {exception}')
            return
        log.info("sqlite_for_display succeeded")