#
# This file is a part of the open-eBackup project.
# This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0.
# If a copy of the MPL was not distributed with this file, You can obtain one at
# http://mozilla.org/MPL/2.0/.
#
# Copyright (c) [2024] Huawei Technologies Co.,Ltd.
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
#

import json
import os
import random
import shutil
import signal
import time
import uuid
from concurrent.futures import ThreadPoolExecutor, as_completed
from multiprocessing import cpu_count
from threading import Lock, Thread

import psutil

from clickhouse import log
from clickhouse.common.parse_param import JsonParam
from clickhouse.services.clickhouse_resource_service import ClickHouseResourceService

from clickhouse.client.clickhouse_client import ClickHouseClient
from clickhouse.common.clickhouse_common import get_agent_id, report_job_details, report, compute_size
from clickhouse.common.clickhouse_constants import JobResult, LogLevel, BackupType, AgentConstant, CommandReturnCode, \
    ErrorCode, TaskStage
from clickhouse.schemas.clickhouse_schemas import CommonBodyResponse, PermissionInfo, SubJob
from clickhouse.services.clickhouse_file_service import ClickHouseFileService
from clickhouse.services.clickhouse_sqlite_service import ClickHouseSqliteService
from common.common import output_result_file
from common.common_models import SubJobDetails, LogDetail
from common.const import SubJobStatusEnum, RepositoryDataTypeEnum, SubJobPriorityEnum
from common.util.backup import query_progress, backup_dirs
from common.util.exec_utils import exec_mkdir_cmd
from common.util.scanner_utils import scan_dir_size


class ClickHouseBackupService:
    """
    备份任务相关接口
    """

    @staticmethod
    def allow_backup_in_local_node(pid, job_id, sub_job_id):
        """
        功能描述：是否允许本地运行, 业务目前不需要实现, 主任务执行
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'step 1: execute allow_backup_in_local_node, pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        response = CommonBodyResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def allow_backup_sub_job_in_local_node(pid, job_id, sub_job_id):
        """
        功能描述：是否允许本地运行, 业务目前不需要实现, 子任务都会执行
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'execute allow_backup_sub_job_in_local_node, pid: {pid}, job_id: {job_id}, sub_job_id: {sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        response = CommonBodyResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def check_backup_job_type(pid, job_id):
        """
        功能描述：检查备份类型, 主任务执行, 业务目前不需要实现
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'step 2: execute check_backup_job_type, pid: {pid}, job_id:{job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        response = CommonBodyResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def query_job_permission(pid, job_id):
        """
        功能描述：设置文件系统权限, 主任务执行，目前clickhouse备份是在生产上产生备份文件, 拷贝到X8000, 设置默认root权限
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'step 3: execute query_job_permission, pid: {pid}, job_id: {job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        response = PermissionInfo()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def backup_prerequisite(pid, job_id):
        """
        功能描述：备份前置任务, 主任务执行, 打快照, 写Sqlite文件
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        response = CommonBodyResponse()
        client = None
        try:
            file_content = JsonParam.parse_param_with_jsonschema(pid)
            copy_id = file_content['job']['copy'][0]['id']
            ClickHouseBackupService.make_repository_dir(copy_id, file_content['job']['repositories'])

            index, node = ClickHouseBackupService.query_local_node(file_content['job']['protectEnv']['nodes'])
            backup_tables = json.loads(node['extendInfo']['backupTables'])
            backup_tables_len = len(backup_tables)
            log_detail = LogDetail(logInfo="job_log_create_table_copy_start_label",
                                   logInfoParam=[str(backup_tables_len)], logLevel=LogLevel.INFO)
            report_job_details(pid, SubJobDetails(taskId=job_id, progress=0, logDetail=[log_detail],
                                                  taskStatus=SubJobStatusEnum.RUNNING.value))

            meta_path = ClickHouseBackupService.query_meta_path(file_content['job']['repositories'])
            # 先将数据库的元数据写入sqlite文件
            backup_database = node['extendInfo']['backupDatabase']
            ClickHouseSqliteService.write_metadata_to_sqlite_file(meta_path, copy_id, backup_database, None)

            # 备份空库，直接返回
            if backup_tables_len == 0:
                ClickHouseBackupService.report_complete(job_id, pid)
                return

            env_prefix = 'job_protectEnv_nodes_' + str(index) + '_auth'
            client = ClickHouseClient(pid, node['extendInfo'], env_prefix, node['auth']['extendInfo'])
            cluster_name = client.query_system_clusters()[0].cluster
            resource_id = file_content['job']['protectObject']['id']
            resources = ClickHouseResourceService.query_table(client, backup_database)
            for i, table_name in enumerate(backup_tables):
                if backup_tables.get(table_name) == BackupType.TABLE_DOES_NOT_EXIST.value:
                    report(backup_tables_len, i, pid, job_id, log_info="plugin_clickhouse_has_deleted_warn_label",
                           log_level=LogLevel.WARN, task_status=SubJobStatusEnum.RUNNING.value,
                           logInfoParam=[str(i + 1), backup_database, table_name])
                    continue
                engine = ClickHouseBackupService.get_engine(resources, table_name)
                # 备份副本元数据写入sqlite文件
                ClickHouseSqliteService.write_metadata_to_sqlite_file(meta_path, copy_id, backup_database, table_name)
                if client.freeze_table(cluster_name, backup_database, table_name, engine, resource_id + '_' + copy_id):
                    report(backup_tables_len, i, pid, job_id, log_info="job_log_create_table_copy_success_label",
                           log_level=LogLevel.INFO, task_status=SubJobStatusEnum.RUNNING.value,
                           logInfoParam=[str(i + 1), backup_database, table_name])
                else:
                    report(backup_tables_len, i, pid, job_id, log_info="job_log_create_table_copy_fail_label",
                           log_level=LogLevel.ERROR, task_status=SubJobStatusEnum.FAILED.value,
                           logInfoParam=[str(i + 1), backup_database, table_name])
                    return
            ClickHouseBackupService.report_complete(job_id, pid)
        except Exception as e:
            log.error(e, exc_info=True)
            ClickHouseBackupService.set_error_response(response)
            log_detail = LogDetail(logInfo="plugin_create_snapshot_fail_label", logLevel=LogLevel.ERROR)
            report_job_details(pid, SubJobDetails(taskId=job_id, progress=100, logDetail=[log_detail],
                                                  taskStatus=SubJobStatusEnum.FAILED.value))
        finally:
            ClickHouseBackupService.finally_do(client, pid, response)

    @staticmethod
    def finally_do(client, pid, response):
        output_result_file(pid, response.dict(by_alias=True))
        ClickHouseBackupService.remove_kerberos_info(client)

    @staticmethod
    def get_engine(resources, table_name):
        engine = None
        for resource in resources:
            if resource.name == table_name:
                engine = resource.extend_info['table_engine']
                break
        return engine

    @staticmethod
    def report_complete(job_id, pid):
        log_detail = LogDetail(logInfo="plugin_create_snapshot_success_label", logLevel=LogLevel.INFO)
        report_job_details(pid, SubJobDetails(taskId=job_id, progress=100, logDetail=[log_detail],
                                              taskStatus=SubJobStatusEnum.COMPLETED.value))

    @staticmethod
    def remove_kerberos_info(client):
        if client is not None:
            client.remove_kerberos_files()

    @staticmethod
    def make_repository_dir(copy_id, repositories):
        """
        功能描述：创建目录
        参数：
        @copy_id： copyID
        @repositories： 仓库
        """
        for repository in repositories:
            repository_type = int(repository['repositoryType'])
            if repository_type == RepositoryDataTypeEnum.META_REPOSITORY.value or \
                    repository_type == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                copy_id_path = f"{repository.get('path')[0]}/{copy_id}"
                if not os.path.exists(copy_id_path):
                    exec_mkdir_cmd(copy_id_path)

    @staticmethod
    def backup_prerequisite_progress(pid, job_id, sub_job_id):
        """
        功能描述：备份前置任务进度, 业务无需实现, 默认100返回
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：SubJobDetails
        """
        log.info(f'execute backup_prerequisite_job_progress, pid: {pid}, job_id: {job_id},  sub_job_id: {sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        progress = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, progress=100,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        output_result_file(pid, progress.dict(by_alias=True))

    @staticmethod
    def backup_gen_sub_job(pid, job_id):
        """
        功能描述：生成子任务, 主任务执行, 随机分配文件系统
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'step 5: execute backup_gen_sub_job, pid: {pid}, job_id: {job_id}')
        response = []
        try:
            file_content = JsonParam.parse_param_with_jsonschema(pid)
            repositories = file_content['job']['repositories']
            fs_ids = []
            for repository in repositories:
                if repository['repositoryType'] == RepositoryDataTypeEnum.DATA_REPOSITORY:
                    fs_ids.append(repository['extendInfo']['fsId'])
            random.shuffle(fs_ids)
            fs_ids_len = len(fs_ids)

            fs_ids_index = 0
            nodes = file_content['job']['protectEnv']['nodes']
            for node in nodes:
                agent_id = node['extendInfo']['agentId']
                job_info = f'{fs_ids[fs_ids_index]}'
                response.append(SubJob(jobId=job_id, execNodeId=agent_id, jobName=node['id'], jobInfo=job_info)
                                .dict(by_alias=True))
                fs_ids_index = fs_ids_index + 1
                if fs_ids_index >= fs_ids_len:
                    fs_ids_index = 0

            # 适配框架, 拆分低优先级查询副本子任务
            if len(nodes) > 0:
                local_agent_id = get_agent_id()
                response.append(SubJob(jobId=job_id, execNodeId=local_agent_id, jobName='queryCopy',
                                       jobPriority=SubJobPriorityEnum.JOB_PRIORITY_4.value).dict(by_alias=True))
        except Exception as e:
            log.error(e, exc_info=True)
            response.clear()
        output_result_file(pid, response)

    @staticmethod
    def backup(pid, job_id, sub_job_id):
        """
        功能描述：执行备份, 需要实现, 子任务执行
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        response = CommonBodyResponse()
        client = None
        try:
            file_content = JsonParam.parse_param_with_jsonschema(pid)
            index, node = ClickHouseBackupService.query_local_node(file_content['job']['protectEnv']['nodes'])
            env_prefix = 'job_protectEnv_nodes_' + str(index) + '_auth'
            client = ClickHouseClient(pid, node['extendInfo'], env_prefix, node['auth']['extendInfo'])
            databases = client.query_allow_backup_databases()

            backup_database = node['extendInfo']['backupDatabase']
            copy_id = file_content['job']['copy'][0]['id']
            fs_id = file_content['subJob']['jobInfo']
            repositories = file_content['job']['repositories']
            des_paths = ClickHouseBackupService.query_des_paths(repositories, fs_id, copy_id, get_agent_id())
            tables = ClickHouseBackupService.backup_database(backup_database, client, databases, des_paths[0])

            backup_tables = json.loads(node['extendInfo']['backupTables'])
            backup_tables_len = len(backup_tables)
            if backup_tables_len == 0:
                report_job_details(pid, SubJobDetails(taskId=job_id, progress=100, subTaskId=sub_job_id,
                                                      taskStatus=SubJobStatusEnum.COMPLETED.value))
                return

            disks = client.query_system_disks()
            bak_dir_prefix = file_content['job']['protectObject']['id'] + '_' + copy_id
            ClickHouseBackupService.backup_table(tables, backup_tables, node['extendInfo']['ip'], disks, job_id=job_id,
                                                 pid=pid, sub_job_id=sub_job_id, des_path=des_paths[0],
                                                 bak_dir_prefix=bak_dir_prefix, backup_database=backup_database)

            resources = ClickHouseResourceService.query_table(client, backup_database)

            running_thread_num = [0]
            running_thread_num_lock = Lock()
            already_backup_num = [0]
            backup_data_lock = Lock()
            future_list = []
            ClickHouseBackupService.create_report_backup_progress_thread(already_backup_num, backup_tables_len, job_id,
                                                                         pid, sub_job_id)
            # 多线程备数据
            with ThreadPoolExecutor(max_workers=cpu_count() / 2) as pool:
                for _, table_name in enumerate(backup_tables):
                    table = {"name": table_name, "dbName": backup_database, "backupData": backup_tables.get(table_name),
                             "engine": ClickHouseBackupService.get_engine(resources, table_name)}
                    future_list.append(ClickHouseBackupService.
                                       backup_data(running_thread_num_lock, running_thread_num, backup_data_lock, pool,
                                                   sub_job_id=sub_job_id, tables=tables, databases=databases,
                                                   pid=pid, already_backup_num=already_backup_num, job_id=job_id,
                                                   backup_table=table, backup_tables_len=backup_tables_len,
                                                   disks=disks, bak_dir_prefix=bak_dir_prefix, des_paths=des_paths))

            ClickHouseBackupService.deal_with_future_result(future_list, response)
        except Exception as e:
            log.error(e, exc_info=True)
            ClickHouseBackupService.set_error_response(response)
        finally:
            ClickHouseBackupService.finally_do(client, pid, response)
            ClickHouseBackupService.report_error_job_details(job_id, pid, response, sub_job_id)

    @staticmethod
    def report_error_job_details(job_id, pid, response, sub_job_id):
        if response.code != CommandReturnCode.SUCCESS.value:
            log_detail = LogDetail(logInfo="plugin_backup_subjob_fail_label", logInfoParam=[sub_job_id],
                                   logLevel=LogLevel.ERROR)
            report_job_details(pid, SubJobDetails(taskId=job_id, subTaskId=sub_job_id, progress=100,
                                                  logDetail=[log_detail], taskStatus=SubJobStatusEnum.FAILED.value))

    @staticmethod
    def deal_with_future_result(future_list, response):
        for future in as_completed(future_list):
            if int(future.result()) != CommandReturnCode.SUCCESS.value:
                ClickHouseBackupService.set_error_response(response)
                return

    @staticmethod
    def set_error_response(response):
        response.code = CommandReturnCode.INTERNAL_ERROR.value
        response.body_err = ErrorCode.ERROR_INTERNAL.value

    @staticmethod
    def create_report_backup_progress_thread(already_backup_num, backup_tables_len, job_id, pid, sub_job_id):
        """
        功能描述：单独起个后台线程进度上报
        参数：
        @already_backup_num：已备份的数目
        @backup_tables_len：备份的表的总个数
        @job_id：主任务id
        @pid：请求ID
        @sub_job_id：子任务id
        """

        def report_backup_progress(pid, job_id, sub_job_id, backup_tables_len, already_backup_num):
            """
            功能描述：上报备份进度
            参数：
            @pid：请求ID
            @job_id：主任务任务ID
            @sub_job_id：子任务ID
            @backup_tables_len：备份表个数
            @already_backup_num：已备份个数
            """
            # 如果要备份的表数目为0，直接返回
            if backup_tables_len == 0:
                job_detail = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.RUNNING,
                                           progress=100)
                report_job_details(pid, job_detail)
                return
            while already_backup_num[0] < backup_tables_len:
                task_progress = 100
                if backup_tables_len > 0:
                    task_progress = int(already_backup_num[0] / backup_tables_len * 100)
                job_detail = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, taskStatus=SubJobStatusEnum.RUNNING,
                                           progress=task_progress)
                report_job_details(pid, job_detail)
                time.sleep(30)

        progress_thread = Thread(target=report_backup_progress,
                                 args=(pid, job_id, sub_job_id, backup_tables_len, already_backup_num))
        progress_thread.setDaemon(True)
        progress_thread.start()

    @staticmethod
    def backup_database(backup_database, client, databases, des_path):
        """
        功能描述：备份数据库
        参数：
        @backup_databases：备份的数据库
        @backup_tables：备份的表
        @client：clickhouse客户端
        @databases：数据库
        @kwargs：参数map
        """
        # 备份创库SQL语句
        ClickHouseBackupService.backup_create_db_sql(des_path, databases, backup_database)
        return client.query_allow_backup_tables(backup_database)

    @staticmethod
    def backup_table(tables, backup_tables, endpoint_ip, disks, **kwargs):
        """
        功能描述：备份表
        参数：
        @backup_databases：备份的数据库
        @backup_tables：备份的表
        @client：clickhouse客户端
        @databases：数据库
        @kwargs：参数map
        """
        des_path = kwargs.get('des_path')
        bak_dir_prefix = kwargs.get('bak_dir_prefix')
        job_id = kwargs.get('job_id')
        pid = kwargs.get('pid')
        sub_job_id = kwargs.get('sub_job_id')
        backup_database = kwargs.get('backup_database')
        total_size = 0
        # 先备表结构
        for _, table_name in enumerate(backup_tables):
            if backup_tables[table_name] == BackupType.TABLE_DOES_NOT_EXIST.value:
                continue
            # 备份创表SQL语句
            ClickHouseBackupService.backup_create_table_sql(des_path, backup_database, tables, table_name)
            # 计算数据量
            if backup_tables[table_name] == BackupType.TABLE_AND_DATA.value:
                total_size += ClickHouseBackupService.get_backup_table_dir_size(disks, backup_database=backup_database,
                                                                                table_name=table_name,
                                                                                bak_dir_prefix=bak_dir_prefix,
                                                                                des_path=des_path, job_id=job_id)
        log.info(f'step 6: execute backup_table, total_size: {total_size}')
        # total_size不为0，上报备份数据量
        if total_size:
            log_detail = LogDetail(logInfo="job_log_protection_copy_data_size_label",
                                   logInfoParam=[endpoint_ip, compute_size(total_size, 2)], logLevel=LogLevel.INFO)
            report_job_details(pid,
                               SubJobDetails(taskId=job_id, progress=0, logDetail=[log_detail], subTaskId=sub_job_id,
                                             taskStatus=SubJobStatusEnum.RUNNING.value))

    @staticmethod
    def get_backup_table_dir_size(disks, **dbargs):
        """
        功能描述：获取备份表目录的大小
        参数：
        @backup_table：备份的表名
        @bak_dir_prefix：前缀
        @des_path：目标路径
        @disks：磁盘
        返回值：目录总大小
        """
        total_size = 0
        bak_dir_prefix = dbargs.get('bak_dir_prefix').replace('-', '_')
        backup_database = dbargs.get('backup_database')
        table_name = dbargs.get('table_name')
        des_path = dbargs.get('des_path')
        for disk in disks:
            disk_name = disk.name
            store_path = disk.path + f'shadow/{bak_dir_prefix}_{backup_database}_{table_name}/store'
            parts_path = ClickHouseFileService.get_parts_absolute_path(store_path)
            if parts_path is not None:
                table_data_des_path = f'{des_path}/shadow/{backup_database}/{table_name}/{disk_name}'
                if not os.path.exists(table_data_des_path):
                    exec_mkdir_cmd(table_data_des_path)
                parts_child_names = os.listdir(parts_path)
                for store_child_name in parts_child_names:
                    temp_path = os.path.join(parts_path, store_child_name)
                    total_size += scan_dir_size(dbargs.get('job_id'), temp_path)[1]
        return total_size

    @staticmethod
    def backup_data(running_thread_num_lock, running_thread_num, backup_data_lock, pool, **kwargs):
        """
        功能描述：备份数据
        参数：
        @running_thread_num_lock：修改正在执行的线程数的锁对象
        @running_thread_num：正在执行的线程数
        @pool：线程池
        @backup_data_lock：备份数据计数的锁对象
        @kwargs：入参map
        """
        future = None
        while True:
            running_thread_num_lock.acquire()
            if running_thread_num[0] < cpu_count() / 2:
                running_thread_num[0] = running_thread_num[0] + 1
                running_thread_num_lock.release()

                def backup_metadata_and_data_done_callback(future):
                    running_thread_num_lock.acquire()
                    running_thread_num[0] = running_thread_num[0] - 1
                    running_thread_num_lock.release()

                try:
                    future = pool.submit(ClickHouseBackupService.backup_table_data, backup_data_lock, **kwargs)
                    future.add_done_callback(backup_metadata_and_data_done_callback)
                except Exception as e:
                    log.error(e, exc_info=True)
                break
            else:
                running_thread_num_lock.release()
                time.sleep(AgentConstant.SLEEP_TIME)
        return future

    @staticmethod
    def backup_table_data(backup_data_lock, **kwargs):
        job_id = kwargs.get("job_id")
        already_backup_num = kwargs.get('already_backup_num')
        backup_table = kwargs.get('backup_table')
        backup_tables_len = kwargs.get('backup_tables_len')
        t_name = backup_table.get('name')
        t_db_name = backup_table.get('dbName')
        t_bak_data = backup_table.get('backupData')
        des_paths = kwargs.get('des_paths')
        meta_data = {"backup_data": t_bak_data, "engine": backup_table.get('engine')}
        meta_data_path = f'{des_paths[0]}/metadata/{t_db_name}/{t_name}.meta'
        ClickHouseFileService.write_file(meta_data_path, json.dumps(meta_data))
        try:
            # 备份表数据
            if t_bak_data == BackupType.TABLE_AND_DATA.value:
                ClickHouseBackupService.copy_data(t_db_name, t_name, **kwargs)
            backup_data_lock.acquire()
            already_backup_num[0] = already_backup_num[0] + 1
            backup_data_lock.release()
            if backup_tables_len != 0 and already_backup_num[0] == backup_tables_len:
                task_progress = 100
                if backup_tables_len > 0:
                    task_progress = int(already_backup_num[0] / backup_tables_len * 100)
                total_size = scan_dir_size(job_id, des_paths[0])[1]
                job_detail = SubJobDetails(dataSize=total_size, taskId=job_id, progress=task_progress,
                                           subTaskId=kwargs.get('sub_job_id'),
                                           taskStatus=SubJobStatusEnum.COMPLETED.value)
                report_job_details(kwargs.get('pid'), job_detail)
            return CommandReturnCode.SUCCESS.value
        except Exception as e:
            log.error(e, exc_info=True)
            return CommandReturnCode.INTERNAL_ERROR.value

    @staticmethod
    def backup_create_db_sql(des_path, databases, t_db_name):
        """
        功能描述：备份创建数据库SQL语句
        参数：
        @des_path： 目标路径
        @databases： 数据库列表
        @t_db_name： 数据库名称
        返回值：无
        """
        db_engine = None
        for database in databases:
            if database.name == t_db_name:
                db_engine = database.engine
                break
        if db_engine is not None:
            create_db_sql = f'create database if not exists {t_db_name} engine = {db_engine}'
            db_file_path = f'{des_path}/metadata/{t_db_name}.sql'
            ClickHouseFileService.write_file(db_file_path, create_db_sql)
        else:
            raise Exception(f"database {t_db_name} not exist")

    @staticmethod
    def backup_create_table_sql(des_path, t_db_name, tables, t_name):
        """
        功能描述：备份创建数据表SQL语句
        参数：
        @des_path： 目标路径
        @t_db_name： 数据库名称
        @tables： 数据表列表
        @t_name： 数据表名称
        返回值：无
        """
        create_table_sql = None
        for table in tables:
            if t_name == table.name and t_db_name == table.database:
                create_table_sql = table.create_table_query
                break
        if create_table_sql is not None:
            table_file_path = f'{des_path}/metadata/{t_db_name}/{t_name}.sql'
            ClickHouseFileService.write_file(table_file_path, create_table_sql)
        else:
            raise Exception(f"table {t_name} not exist")

    @staticmethod
    def copy_data(t_db_name, t_name, **kwargs):
        """
        功能描述：备份表数据
        参数：
        @des_paths： 目标路径
        @disks： 磁盘列表
        @t_db_name： 数据库名称
        @bak_dir_prefix： 副本目录前缀
        @t_name： 数据表名称
        返回值：无
        """
        des_paths = kwargs.get('des_paths')
        disks = kwargs.get('disks')
        bak_dir_prefix = kwargs.get('bak_dir_prefix')
        bak_dir_prefix = bak_dir_prefix.replace('-', '_')
        for disk in disks:
            disk_name = disk.name
            store_path = disk.path + f'shadow/{bak_dir_prefix}_{t_db_name}_{t_name}/store'
            parts_path = ClickHouseFileService.get_parts_absolute_path(store_path)
            if parts_path is None:
                continue
            dirs = []
            parts_child_names = os.listdir(parts_path)
            for store_child_name in parts_child_names:
                temp_path = os.path.join(parts_path, store_child_name)
                dirs.append(temp_path)

            temp_job_id = str(uuid.uuid4())
            log.info(f"temp_job_id: {temp_job_id}, t_db_name:{t_db_name}, t_name: {t_name}, disk_name: {disk_name}.")
            table_data_des_path = f'{random.choice(des_paths)}/shadow/{t_db_name}/{t_name}/{disk_name}'
            if not os.path.exists(table_data_des_path):
                exec_mkdir_cmd(table_data_des_path)
            ClickHouseBackupService.so_copy_data(temp_job_id, dirs, table_data_des_path)
            # 拷贝完删除源端
            shutil.rmtree(parts_path)

    @staticmethod
    def so_copy_data(temp_job_id, dirs, table_data_des_path):
        res = backup_dirs(temp_job_id, dirs, table_data_des_path, False)
        if not res:
            raise Exception(str(f"Failed to start backup, temp_job_id: {temp_job_id}."))
        while True:
            time.sleep(2)
            status, progress, data_size = query_progress(temp_job_id)
            if status == 1:
                log.info(f"Backup completed, temp_job_id: {temp_job_id}.")
                break
            elif status == 3:
                log.info(f"Backup failed, jobId: {temp_job_id}.")
                raise Exception(str(f"Backup failed, jobId: {temp_job_id}."))

    @staticmethod
    def backup_post_job(pid, job_id):
        """
        功能描述：执行备份后置任务, 清理在生产上产生的文件, 每个节点都执行, advanceParams.put("multiPostJob", "true")
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'step 7: execute backup_post_job, pid: {pid}, job_id: {job_id}')
        response = CommonBodyResponse()
        client = None
        try:
            file_content = JsonParam.parse_param_with_jsonschema(pid)
            sub_job = file_content['subJob']
            mount_path = sub_job['jobInfo']
            copy_id = file_content['job']['copy'][0]['id']
            des_path = f'{mount_path}/{copy_id}'
            backup_job_result = file_content['backupJobResult']
            if backup_job_result != JobResult.SUCCESS.value:
                if os.path.exists(des_path):
                    shutil.rmtree(des_path)

            nodes = file_content['job']['protectEnv']['nodes']
            index, node = ClickHouseBackupService.query_local_node(nodes)
            app_extend_info = node['extendInfo']
            env_prefix = 'job_protectEnv_nodes_' + str(index) + '_auth'
            auth_extend_info = node['auth']['extendInfo']
            client = ClickHouseClient(pid, app_extend_info, env_prefix, auth_extend_info)
            disks = client.query_system_disks()
            resource_id = file_content['job']['protectObject']['id']
            ClickHouseBackupService.remove_backup_file(disks, resource_id.replace('-', '_'))
        except Exception as e:
            log.error(e, exc_info=True)
            ClickHouseBackupService.set_error_response(response)
        finally:
            ClickHouseBackupService.finally_do(client, pid, response)

    @staticmethod
    def remove_backup_file(disks, resource_id):
        for disk in disks:
            disk_shadow_path = disk.path + f'/shadow'
            if not os.path.exists(disk_shadow_path):
                continue
            shadow_child_names = os.listdir(disk_shadow_path)
            for shadow_child_name in shadow_child_names:
                if shadow_child_name.startswith(resource_id):
                    temp_path = os.path.join(disk_shadow_path, shadow_child_name)
                    log.info(f'remove_backup_file, temp_path: {temp_path}')
                    shutil.rmtree(temp_path)

    @staticmethod
    def backup_post_job_progress(pid, job_id, sub_job_id):
        """
        功能描述：备份后置任务进度, 业务不需要实现
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        @sub_job_id： 子任务ID
        返回值：SubJobDetails
        """
        log.info(f'execute backup_post_job_progress, pid: {pid}, job_id: {job_id},  sub_job_id: {sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        progress = SubJobDetails(taskId=job_id, subTaskId=sub_job_id, progress=100,
                                 taskStatus=SubJobStatusEnum.COMPLETED.value)
        output_result_file(pid, progress.dict(by_alias=True))

    @staticmethod
    def abort_job(pid, job_id, sub_job_id):
        """
        功能描述：执行中止任务, 页面触发, 不会执行后置任务
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'execute to abort_job, job_id:{job_id},sub_job_id:{sub_job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        if job_id is None or len(job_id) == 0:
            return
        processes = psutil.process_iter()
        for process in processes:
            cmd_lines = process.cmdline()
            if len(cmd_lines) < 5:
                continue
            if str(job_id) == str(cmd_lines[4]) and (
                    str(cmd_lines[2]) in [TaskStage.BACKUP_PREREQUISITE, TaskStage.BACKUP_GEN_SUB_JOB,
                                          TaskStage.BACKUP]):
                os.kill(process.pid, signal.SIGKILL)
                return
        output_result_file(pid, CommonBodyResponse().dict(by_alias=True))

    @staticmethod
    def pause_job(pid, job_id):
        """
        功能描述：执行暂停任务，任务流程中触发, 会执行后置任务
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'execute to pause_job, pid: {pid}, job_id: {job_id}')
        JsonParam.parse_param_with_jsonschema(pid)
        response = CommonBodyResponse()
        output_result_file(pid, response.dict(by_alias=True))

    @staticmethod
    def query_backup_copy(pid, job_id):
        """
        功能描述：查询备份副本, 备份成功, 执行后置任务之前执行, 备份失败不会调用
        参数：
        @pid： 请求ID
        @job_id： 主任务任务ID
        返回值：CommonBodyResponse
        """
        log.info(f'execute to query_backup_copy, pid: {pid}, job_id: {job_id}')
        copy_info = {}
        try:
            file_content = JsonParam.parse_param_with_jsonschema(pid)
            nodes = file_content['job']['protectEnv']['nodes']
            copy_id = file_content['job']['copy'][0]['id']
            repositories = file_content['job']['repositories']
            report_repositories = []
            for repository in repositories:
                repository_type = int(repository['repositoryType'])
                if repository_type == RepositoryDataTypeEnum.META_REPOSITORY.value or \
                        repository_type == RepositoryDataTypeEnum.DATA_REPOSITORY.value:
                    repository['remotePath'] = f"{repository['remotePath']}/{copy_id}"
                    report_repositories.append(repository)
            copy_info['repositories'] = report_repositories
            copy_info['extendInfo'] = {'nodes': nodes, 'copyId': copy_id}
        except Exception as e:
            log.error(e, exc_info=True)
        output_result_file(pid, copy_info)

    @staticmethod
    def query_local_node(nodes):
        """
        功能描述：查询当前Agent节点
        参数：
        @nodes： 节点列表
        返回值：节点索引号, 节点
        """
        local_agent_id = get_agent_id()
        index = None
        node = None
        for i, v in enumerate(nodes):
            agent_id = v['extendInfo']['agentId']
            if local_agent_id == agent_id:
                index = i
                node = v
                break
        return index, node

    @staticmethod
    def query_meta_path(repositories):
        """
        功能描述：查询meta仓路径
        参数：
        @repositories： 仓列表
        返回值：meta仓路径
        """
        meta_path = None
        for repository in repositories:
            if repository['repositoryType'] == RepositoryDataTypeEnum.META_REPOSITORY:
                meta_path = repository.get("path")[0]
                break
        return meta_path

    @staticmethod
    def query_des_paths(repositories, fs_id, copy_id, exec_node_id):
        """
        功能描述：获取指定文件系统下的目标路径
        参数：
        @repositories： 仓列表
        @fs_id： 文件系统ID
        @copy_id： 副本ID
        @exec_node_id： agentID
        返回值：目标路径
        """
        des_paths = []
        for repository in repositories:
            if repository['repositoryType'] == RepositoryDataTypeEnum.DATA_REPOSITORY and \
                    fs_id == repository['extendInfo']['fsId']:
                for path in repository.get("path"):
                    des_paths.append(f'{path}/{copy_id}/{exec_node_id}')
                break
        return des_paths
